]+>([^<]+)<.*?
.*?>(.*?).*?' \\\r\n 'src=\"([^\"]+)\".*?]+>([^<]+)<.*?]*>' \\\r\n '(.*?).*?src=\"([^\"]+)\".*?]+>([^<]+)<'\r\n matches = find_multiple_matches(bloque, patron)\r\n for hora, estado, t1thumb, team1, url, matchid, score, t2thumb, team2 in matches:\r\n h, m = hora.split(\":\")\r\n time_match = datetime.datetime(now.year, now.month, now.day, int(h), int(m))\r\n estado = htmlclean(estado).replace(\"\\xc2\\xa0\", \"\")\r\n t1thumb = t1thumb.rsplit(\"?\", 1)[0].replace(\"/small/\", \"/original/\")\r\n t2thumb = t2thumb.rsplit(\"?\", 1)[0].replace(\"/small/\", \"/original/\")\r\n if \"chk_hour\" in score or \"Aplazado\" in estado:\r\n score = \"-- : --\"\r\n score = htmlclean(score)\r\n canal = find_single_match(bloque, ' | \\s*\\s*([^<]+)').replace(\"Partidos \", \"\")\r\n partidos.append({\"next\": next, \"prev\": prev, \"today\": today})\r\n\r\n return partidos\r\n\r\ndef get_minutos(url):\r\n data = httptools.downloadpage(url, cookies=False).data\r\n data = re.sub(r\"\\n|\\r|\\t\", '', data)\r\n minuto = find_single_match(data, '([^<]+)<').replace(\"DIRECTO (\", \"\").replace(\")\", \"\")\r\n\r\n return minuto\r\n\r\ndef refresh_score():\r\n from core import jsontools\r\n try:\r\n data = httptools.downloadpage(\"http://www.resultados-futbol.com/ajax/refresh_live.php\").data\r\n data = jsontools.load_json(data)\r\n if not data:\r\n data = {}\r\n return data\r\n except:\r\n return {}\r\n\r\ndef get_info(url, reload=False):\r\n data = httptools.downloadpage(url, cookies=False).data\r\n data = re.sub(r\"\\n|\\r|\\t\", '', data)\r\n \r\n jornada = find_single_match(data, ' | (.*?)')\r\n patron = '
(\\d+).*? | ([^<]*)<.*?>([^<]+).*? | ([^<]+) | .*?' \\\r\n '
([^<]+) | .*?
([^<]*)<.*? | ([^<]*)<.*?' \\\r\n ' | ([^<]*)<.*? | ([^<]*)<.*? | ([^<]*)<.*? | ([^<]*)<'\r\n stats = find_multiple_matches(bloque, patron)\r\n for st in stats:\r\n driver['stats'][st[0]] = {'pos': st[1], 'equipo': st[2], 'chasis': st[3], 'motor': st[4], \r\n 'neuma': st[5], 'vict': st[6].strip(), 'poles': st[7].strip(),\r\n 'vr': st[8].strip(), 'puntos': st[9].strip(), 'gps': st[10].strip()}\r\n\r\n driver['news'] = find_single_match(data, '.*?href=\"([^\"]+)\"')\r\n\r\n return driver\r\n\r\n\r\ndef get_driver_news(url):\r\n data = httptools.downloadpage(\"http://soymotor.com%s\" % url).data\r\n\r\n news = []\r\n matches = find_multiple_matches(data, ' .*?href=\"([^\"]+)\".*?title=\"([^\"]+)\"' \\\r\n '.*?src=\"([^\"]+)\".*?\\s*\\|\\s*(.*?)<')\r\n for url, title, img, fecha in matches:\r\n img = img.replace(\"small_video\", \"large\")\r\n title = decodeHtmlentities(title)\r\n news.append([url, title, img, fecha])\r\n\r\n next = find_single_match(data, 'href=\"([^\"]+)\">siguiente')\r\n\r\n return news, next\r\n \r\n\r\ndef get_content_news(url):\r\n data = httptools.downloadpage(url).data\r\n\r\n ante = find_single_match(data, '(.*?) ([^<]+)')\r\n cuerpo = find_single_match(data, '(.*?) ')\r\n cuerpo = cuerpo.replace('', '[B]').replace('', '[/B]').replace(\"\", \"\\n\").replace(\" \", \"\\n\")\r\n cuerpo = htmlclean(cuerpo)\r\n\r\n noti = \"[COLOR gray]%s[/COLOR]\\n[B]%s[/B]\\n%s\" % (ante, \"\\n\".join(sub), cuerpo)\r\n noti = re.sub(r' {2,}', '', noti)\r\n noti = re.sub(r'\\n{3,}', '\\n\\n', noti)\r\n\r\n return noti\r\n\r\n\r\ndef get_data_pilotof1(data, piloto, data_driver):\r\n if not data:\r\n data = httptools.downloadpage(\"https://www.formula1.com/en/championship/drivers.html\").data\r\n if not data_driver:\r\n data_driver = httptools.downloadpage(\"http://www.formula1.com/sp/static/f1/2017/updates/data/drivers_all.js\").data\r\n nacido = find_single_match(data_driver, '%s.*?\"DOB\":\"([^\"]*)\"' % piloto)\r\n lugar = find_single_match(data_driver, '%s.*?\"POB\":\"([^\"]*)\"' % piloto)\r\n if lugar:\r\n nacido += \" , %s\" % lugar.replace(\"\\u00c9\", \"É\")\r\n \r\n piloto = piloto.rsplit(\".\", 1)[1]\r\n\r\n name = \"\"\r\n thumb = \"\"\r\n casco = \"\"\r\n equipo = \"\"\r\n matches = find_multiple_matches(data, '(.*?).*?(.*?)')\r\n for driver, team in matches:\r\n driver = decodeHtmlentities(driver)\r\n driver = quita_tildes(driver)\r\n if re.search(r'(?i)%s' % piloto, driver):\r\n name = re.sub(r'\\s{2,}', ' ', driver)\r\n equipo = team\r\n break\r\n if name:\r\n thumb = \"https://www.formula1.com/content/fom-website/en/championship/drivers/%s/_jcr_content/image.img.1920.medium.jpg\" % name.replace(\" \", \"-\").lower()\r\n casco = thumb.replace(\"/image.img\", \"/helmet.img\")\r\n\r\n return thumb, casco, data, name, equipo, nacido, data_driver\r\n\r\n\r\ndef get_tyres(num):\r\n folder = filetools.join(config.get_runtime_path(), 'resources', 'images', 'matchcenter')\r\n if num == 0:\r\n return ''\r\n elif num == 1:\r\n return filetools.join(folder, 'super.png')\r\n elif num == 2:\r\n return filetools.join(folder, 'soft.png')\r\n elif num == 3:\r\n return filetools.join(folder, 'medium.png')\r\n elif num == 4:\r\n return filetools.join(folder, 'hard.png')\r\n elif num == 5:\r\n return filetools.join(folder, 'inter.png')\r\n elif num == 6:\r\n return filetools.join(folder, 'wet.png')\r\n elif num == 7:\r\n return filetools.join(folder, 'ultra.png')\r\n \r\ndef quita_tildes(title):\r\n title = title.replace(\"Á\",\"A\")\r\n title = title.replace(\"É\",\"E\")\r\n title = title.replace(\"Í\",\"I\")\r\n title = title.replace(\"Ó\",\"O\")\r\n title = title.replace(\"Ú\",\"U\")\r\n title = title.replace(\"á\",\"a\")\r\n title = title.replace(\"é\",\"e\")\r\n title = title.replace(\"í\",\"i\")\r\n title = title.replace(\"ó\",\"o\")\r\n title = title.replace(\"ú\",\"u\")\r\n title = title.replace(\"À\",\"A\")\r\n title = title.replace(\"È\",\"E\")\r\n title = title.replace(\"Ì\",\"I\")\r\n title = title.replace(\"Ò\",\"O\")\r\n title = title.replace(\"Ù\",\"U\")\r\n title = title.replace(\"à\",\"a\")\r\n title = title.replace(\"è\",\"e\")\r\n title = title.replace(\"ì\",\"i\")\r\n title = title.replace(\"ò\",\"o\")\r\n title = title.replace(\"ù\",\"u\")\r\n title = title.replace(\"ç\",\"c\")\r\n title = title.replace(\"Ç\",\"C\")\r\n title = title.replace(\"Ñ\",\"ñ\")\r\n title = title.replace(\"ö\",\"o\")\r\n title = title.replace(\"ä\",\"a\")\r\n title = title.replace(\"ï\",\"i\")\r\n title = title.replace(\"ë\",\"e\")\r\n title = title.replace(\"ü\",\"u\")\r\n return title","repo_name":"gacj22/WizardGacj22","sub_path":"plugin.video.deportesalacarta/lib/matchcenter/marcadores.py","file_name":"marcadores.py","file_ext":"py","file_size_in_byte":57592,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"}
+{"seq_id":"21916546640","text":"#!/usr/bin/env python\n\"\"\"\nWith RSA, the size of the plaintext has basically no influence on the encryption or decryption time.\nTiming attacks cannot be mounted based on a plaintext size\n\n$ python -m timeit -s \"import rsa\" -- \"rsa.encrypt(False)\" 7:09\n1000 loops, best of 5: 251 usec per loop\n\n$ python -m timeit -s \"import rsa\" -- \"rsa.encrypt(True)\" 7:14\n1000 loops, best of 5: 217 usec per loop\n\"\"\"\nimport optparse\nimport sys\n\nimport cryptography.hazmat.primitives.asymmetric.rsa as rsa\n\n# Computed as global variables so that they don't have an impact on the time computation of the encryption/decryption\n# algorithm\n\ne = 65537\npkey = rsa.generate_private_key(e, 4096)\nn = pkey.public_key().public_numbers().n\nd = pkey.private_numbers().d\n\n\ndef process_params():\n parser = optparse.OptionParser()\n parser.add_option(\n \"-s\",\n \"--short\",\n dest=\"short\",\n action=\"store_true\",\n default=False,\n help=\"Whether to run the algorithm with a short number plaintext\",\n )\n parser.add_option(\n \"-l\",\n \"--long\",\n dest=\"long\",\n action=\"store_true\",\n default=False,\n help=\n \"Whether to run the algorithm with a long number plaintext (like n-2)\",\n )\n parser.add_option(\n \"-p\",\n \"--print\",\n dest=\"print\",\n action=\"store_true\",\n default=False,\n help=\"Whether we should print the result to stdout\",\n )\n parser.set_usage(help_message())\n return parser.parse_args()\n\n\ndef display_help():\n print(help_message())\n\n\ndef help_message():\n return (\n \"This is a sample python app to compute noob level RSA encryption and decryption\"\n )\n\n\ndef handle_opts(opts, args):\n if opts.short and opts.long:\n print(f\"You have to chose either short or long\")\n sys.exit(101)\n res = encrypt(opts.long)\n if opts.print:\n print(res)\n\n\ndef encrypt(long=False):\n m = 1023\n if long:\n m = n - 2\n return pow(m, e, n)\n\n\ndef main():\n opts, args = process_params()\n handle_opts(opts, args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GuyInTheShell/Crypto","sub_path":"RSA/1_timing/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40450025247","text":"# Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.\n\n# 1. Use zip to transform the matrix. \n# The meaning of return a and b is that when a is empty, the function jumps out. When a is not empty, b is executed. I am confused about this.\n# Time: \n# T(m, n) = O(n) + O((n-1)*m) + T(n-1, m); \n# O(n) to extend and O((n-1)*m) to rotate.\n# O(n) + O((n-1)*m) is around O(mn)\n# T(m, n) = O(mn)+T(n-1,m) = O(2mn)+T(n-1, m-1)\n# T(m, n) = O(min(m,n)*mn)\n\n# 2. Normal way.\n\n\nclass Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n return matrix and list(matrix.pop(0)) + self.spiralOrder(list(zip(*matrix))[::-1])\n\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n if not matrix or not matrix[0]:\n return []\n ans = []\n m, n = len(matrix), len(matrix[0])\n u, d, l, r = 0, m - 1, 0, n - 1\n while l < r and u < d:\n ans.extend([matrix[u][j] for j in range(l, r)])\n ans.extend([matrix[j][r] for j in range(u, d)])\n ans.extend([matrix[d][j] for j in range(r, l, -1)])\n ans.extend([matrix[j][l] for j in range(d, u, -1)])\n u, d, l, r = u+1, d-1, l+1, r-1\n if l == r:\n ans.extend([matrix[j][r] for j in range(u, d+1)])\n elif u == d:\n ans.extend([matrix[u][j] for j in range(l, r+1)])\n return ans","repo_name":"jwu424/Leetcode","sub_path":"SpiralMatrix.py","file_name":"SpiralMatrix.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14678386101","text":"from flask import Flask, render_template, request, redirect, session, url_for\nimport pandas as pd\nimport os\nfrom tinydb import TinyDB, Query\nimport random\nimport string\n\n\napp = Flask(__name__)\napp.secret_key = \"fffff\"\n\n@app.route(\"/admin\", methods=['GET','POST'])\ndef admin():\n if request.method == \"GET\":\n return render_template(\"admin.html\")\n elif request.method == \"POST\":\n task = request.form['task']\n question = request.form['question']\n options = request.form['options']\n annotators = request.form['annotators']\n global db\n db = TinyDB('db.json')\n admin = db.table('admin')\n taskid = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))\n f = request.files['file']\n filestring = (str(taskid) + str(f.filename))\n f.save(filestring)\n admin.insert({'taskid':taskid,'task':task,'question':question,'options':options,'filestring':filestring})\n users = db.table('users')\n annotators = annotators.split('|')\n user_array = []\n for annotator in annotators:\n token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))\n user_array.append([annotator,token])\n users.insert({'user':annotator,'token':token,'progress':0,'task':task,'taskid':taskid,'filestring':filestring})\n return render_template('users.html',users=user_array)\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT',8080))\n app.run(host='0.0.0.0', port=port, threaded = True)\n","repo_name":"nilansaha/Crowd","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"26153466287","text":"# this is not recursion\n\n\ndef isPalindrome(n):\n\n # Find the appropriate divisor\n # to extract the leading digit\n divisor = 1\n while (n / divisor >= 10):\n divisor *= 10\n\n while (n != 0):\n\n leading = n // divisor #taking out the first number\n trailing = n % 10 #taking out the last number\n\n # If first and last digit\n # not same return false\n if (leading != trailing):\n return False\n\n # Removing the leading and\n # trailing digit from number\n n = (n % divisor)//10\n\n # Reducing divisor by a factor\n # of 2 as 2 digits are dropped\n divisor = divisor/100\n\n return True\n\n\n# Driver code\nn = int(input())\nif(isPalindrome(n)):\n print('Yes, it is palindrome')\nelse:\n print('No, not palindrome')\n\n# This code is contributed by Danish Raza\n","repo_name":"rudranil723/DSA_rudy","sub_path":"for_notes/recursion/palindrom without using third variable.py","file_name":"palindrom without using third variable.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71373123247","text":"#\n# @lc app=leetcode.cn id=824 lang=python3\n#\n# [824] 山羊拉丁文\n#\n# @lc code=start\nMETA_LETTER = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n\n\nclass Solution:\n\n def toGoatLatin(self, sentence: str) -> str:\n return \" \".join(\n (\n word\n if word[0] in META_LETTER else\n (word[1:] + word[0])\n ) + \"ma\" + \"a\" * i\n for i, word in enumerate(sentence.split(\" \"), 1)\n )\n\n\n# print(Solution().toGoatLatin(\"I speak Goat Latin\"))\n# print(Solution().toGoatLatin(\"The quick brown fox jumped over the lazy dog\"))\n# @lc code=end\n","repo_name":"dreamhunter2333/leetcode_practise","sub_path":"leetcode/824.山羊拉丁文.py","file_name":"824.山羊拉丁文.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"}
+{"seq_id":"32698031666","text":"from Pants import Paints\nclass SalesPerson:\n \"\"\"The SalesPerson class represents an employee in the store\n\n \"\"\"\n \"\"\"\n \"\"\"\n def __init__(self, first_name, last_name, employee_id, salary):\n self.first_name = first_name\n self.last_name = last_name\n self.employee_id = employee_id\n self.salary = salary\n self.pants_sold = []\n self.total_sales = 0\n\n def sell_pants(self, paint):\n self.pants_sold.append(paint)\n self.total_sales += 1\n\n def display_sales(self):\n for item in self.pants_sold:\n print(\"color:{}, waist_size:{}\",item.color, item.length)\n\n def calculate_sales(self):\n total = 0\n for item in self.pants_sold:\n total += item.price\n return total\n\n def calculate_commission(self, percentage):\n sales_total = self.calculate_sales()\n return sales_total * percentage \n\ndef check_results():\n pants_one = Paints('red', 35, 36, 15.12)\n pants_two = Paints('blue', 40, 38, 24.12)\n pants_three = Paints('tan', 28, 30, 8.12)\n \n salesperson = SalesPerson('Amy', 'Gonzalez', 2581923, 40000)\n \n assert salesperson.first_name == 'Amy'\n assert salesperson.last_name == 'Gonzalez'\n assert salesperson.employee_id == 2581923\n assert salesperson.salary == 40000\n assert salesperson.pants_sold == []\n assert salesperson.total_sales == 0\n \n salesperson.sell_pants(pants_one)\n salesperson.pants_sold[0] == pants_one.color\n \n salesperson.sell_pants(pants_two)\n salesperson.sell_pants(pants_three)\n assert len(salesperson.pants_sold) == 3\n assert round(salesperson.calculate_sales(),2) == 47.36\n assert round(salesperson.calculate_commission(.1),2) == 4.74\n \n print('Great job, you made it to the end of the code checks!')\n \ncheck_results()","repo_name":"wangweijun123/python_all","sub_path":"workspace/SalesPerson.py","file_name":"SalesPerson.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"74489547566","text":"import torch\nfrom .base_model import BaseModel\nfrom . import networks\nimport numpy as np\nimport os\n\n\nclass AAUNetModel(BaseModel):\n def name(self):\n return 'AAUNetModel'\n\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n # changing the default values\n if is_train:\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n return parser\n\n def initialize(self, opt, dataset):\n BaseModel.initialize(self, opt)\n self.isTrain = opt.isTrain\n self.input = opt.input\n # specify the training losses you want to print out. The program will call base_model.get_current_losses\n self.loss_names = ['segmentation']\n # specify the images you want to save/display. The program will call base_model.get_current_visuals\n self.visual_names = ['rgb_image', 'tdisp_image', 'label', 'output']\n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n self.model_names = ['AAUNet']\n\n # load/define networks\n self.netAAUNet = networks.define_AAUNet(dataset.num_labels, init_type=opt.init_type, init_gain= opt.init_gain, gpu_ids= self.gpu_ids)\n # define loss functions\n self.criterionSegmentation = networks.SegmantationLoss(class_weights=None).to(self.device)\n\n if self.isTrain:\n # initialize optimizers\n self.optimizers = []\n self.optimizer_AAUNet = torch.optim.SGD(self.netAAUNet.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)\n #self.optimizer_AAUNet = torch.optim.Adam(self.netAAUNet.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)\n self.optimizers.append(self.optimizer_AAUNet)\n self.set_requires_grad(self.netAAUNet, True)\n\n def set_input(self, input):\n self.rgb_image = input['rgb_image'].to(self.device)\n self.tdisp_image = input['tdisp_image'].to(self.device)\n self.label = input['label'].to(self.device)\n self.image_names = input['path']\n self.image_oriSize = input['oriSize']\n\n def forward(self):\n if self.input == 'rgb':\n self.output = self.netAAUNet(self.rgb_image)\n else:\n self.output = self.netAAUNet(self.tdisp_image)\n\n def get_loss(self):\n self.loss_segmentation = self.criterionSegmentation(self.output, self.label)\n\n def backward(self):\n self.loss_segmentation.backward()\n\n def optimize_parameters(self):\n self.forward()\n self.optimizer_AAUNet.zero_grad()\n self.get_loss()\n self.backward()\n self.optimizer_AAUNet.step()\n","repo_name":"hlwang1124/AAFramework","sub_path":"models/aaunet_model.py","file_name":"aaunet_model.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"2"}
+{"seq_id":"40130152055","text":"import time\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom transformers import BertTokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom config import MAX_LEN\r\n\r\n\r\ndef preprocessing(df):\r\n sentences = df.sentence.values\r\n labels = np.array([l for l in df.label.values]) #np.array([labels_encoding[l] for l in df.label.values])\r\n\r\n tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=True)\r\n \r\n encoded_sentences = []\r\n for sent in sentences:\r\n encoded_sent = tokenizer.encode(\r\n sent,\r\n add_special_tokens = True,\r\n truncation=True,\r\n max_length = MAX_LEN\r\n )\r\n \r\n encoded_sentences.append(encoded_sent)\r\n encoded_sentences = pad_sequences(encoded_sentences, maxlen=MAX_LEN, dtype=\"long\", \r\n value=0, truncating=\"post\", padding=\"post\")\r\n return encoded_sentences, labels\r\n \r\ndef attention_masks(encoded_sentences):\r\n # attention masks, 0 for padding, 1 for actual token\r\n attention_masks = []\r\n for sent in encoded_sentences:\r\n att_mask = [int(token_id > 0) for token_id in sent]\r\n attention_masks.append(att_mask)\r\n return attention_masks\r\n\r\ndef compute_accuracy(preds, labels):\r\n p = np.argmax(preds, axis=1).flatten()\r\n l = labels.flatten()\r\n return np.sum(p==l)/len(l)","repo_name":"ArkaJU/Multilingual-BERT","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"38580217010","text":"import numpy\n\n# Flux conserving linear interpolation.\n\ndef finterp(xout, xin, yin, e_yin=None, left=None, right=None):\n nin = len(xin)\n nout = len(xout)\n\n # Wavelength interval of each pixel.\n # delta_x = x_i+1 - x_i-1\n # do by shifting input array.\n # Extend to ends (where we have no information).\n dxin = numpy.empty_like(xin)\n\n dxin[1:nin-1] = 0.5*(xin[2:nin] - xin[0:nin-2])\n\n dxin[0] = dxin[1]\n dxin[nin-1] = dxin[nin-2]\n\n dxout = numpy.empty_like(xout)\n\n dxout[1:nout-1] = 0.5*(xout[2:nout] - xout[0:nout-2])\n\n dxout[0] = dxout[1]\n dxout[nout-1] = dxout[nout-2]\n\n # Interpolate in flux density, and then scale back to flux.\n yout = dxout * numpy.interp(xout, xin, yin/dxin, left, right)\n\n if e_yin is not None:\n tmp = numpy.interp(xout, xin, (e_yin/dxin)**2, left, right)\n tmp[tmp < 0] = 0\n e_yout = dxout * numpy.sqrt(tmp)\n return yout, e_yout\n else:\n return yout\n","repo_name":"mdwarfgeek/tres-tools","sub_path":"finterp.py","file_name":"finterp.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"26220748474","text":"class Solution:\n def removeOuterParentheses(self, s: str) -> str:\n out=0\n res=[]\n for i in s:\n if i==')':\n out-=1\n if out>0:\n res.append(i)\n if i=='(':\n out+=1\n return \"\".join(res)\n","repo_name":"suchibratarout/prat_leet","sub_path":"1078-remove-outermost-parentheses/1078-remove-outermost-parentheses.py","file_name":"1078-remove-outermost-parentheses.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"13152238809","text":"from collections import defaultdict\n\nsmall_caves = set()\nlinks = defaultdict(list)\nwith open('input.txt') as f:\n for line in f:\n left, right = line.strip().split('-')\n\n links[left].append(right)\n links[right].append(left)\n\n if left.islower():\n small_caves.add(left)\n if right.islower():\n small_caves.add(right)\n\npaths = [['start']]\ncomplete_paths = []\nwhile len(paths) > 0:\n path = paths.pop(0)\n current_cave = path[-1]\n\n # If we're in the end cave, add this to the complete paths and skip to the next path\n if current_cave == 'end':\n complete_paths.append(path)\n continue\n\n options = links[current_cave]\n for option in options:\n # If this option is a small cave and we've already been in it, skip to the next option\n if option in path and option in small_caves:\n continue\n\n paths.append(path + [option])\n\nprint(len(complete_paths))\n","repo_name":"simonbrahan/aoc2021","sub_path":"12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21362982304","text":"from tkinter import *\nimport time\nwindow=Tk()\nwindow.title(\"homa\")\nlabel=Label(window,text=\"Hey!!!\",fg='green',height=50,width=50,bg='white',font=('Times New Roman',80))\nlabel.pack()\ntime.sleep(3.0)\nlabel2=Label(window,text=\"Please type or speak...\",fg='green',height=50,width=50,bg='white',font=('Times New Roman',50),anchor=SW)\nlabel2.pack()\nwindow.mainloop()\n","repo_name":"mshayan63/sound-supporter","sub_path":"graphic.py","file_name":"graphic.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"17371756671","text":"import base64\nimport json\nfrom io import BytesIO, StringIO\n\nimport boto3\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nprint('Loading function')\n\n\nclass csv2png:\n def __init__(self):\n self.csv_data = None\n self.image = None\n\n def read_data(self, body):\n self.csv_data = pd.read_csv(StringIO(body), header=None)\n\n def draw_graph(self):\n buf = BytesIO()\n Column_name = self.csv_data.columns.values\n for i in range(1, len(self.csv_data.columns)):\n plt.plot(self.csv_data[Column_name[0]],\n self.csv_data[Column_name[i]],\n label=Column_name[i])\n plt.savefig(buf, format='png')\n buf.seek(0)\n self.image = buf.read()\n\n def response(self):\n return base64.b64encode(bytes(self.image)).decode('utf-8')\n\n\ndef lambda_handler(event, context):\n\n # check post\n try:\n request_body = event['body']\n except AttributeError:\n return {\n \"statusCode\":\n 500,\n \"body\":\n json.dumps({\n \"message\": \"'dict' object has no attribute 'body'\",\n }),\n }\n\n # constacter\n cp = csv2png()\n # read csv format data\n cp.read_data(body=request_body)\n # draw graph\n cp.draw_graph()\n\n return {\n \"statusCode\": 200,\n \"body\": cp.response(),\n \"headers\": {\n 'Content-Type': 'image/png'\n },\n \"isBase64Encoded\": True\n }\n","repo_name":"yoshi65/csv2png","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"8635279665","text":"\"\"\"\n Phone number country and location tracing\n in Python with phonenumbers library\n\"\"\"\n# pip install phonenumbers\n\nimport phonenumbers\nfrom phonenumbers import geocoder, carrier\n\ndef trace_phone_number(phone_number):\n try:\n parsed_number = phonenumbers.parse(phone_number, None)\n country = geocoder.country_name_for_number(parsed_number, 'en')\n location = geocoder.description_for_number(parsed_number, 'en')\n subscriber_name = carrier.safe_display_name(parsed_number, 'en')\n \n return country, location, subscriber_name\n \n except phonenumbers.phonenumberutil.NumberParseException:\n return None, None\n\n# testing the code\nphone_number = \"+14155534673\"\ncountry, location, subscriber_name = trace_phone_number(phone_number)\n\nif country and location:\n print('Country:', country)\n print('Location:', location)\n print('Service provider:', subscriber_name)\nelse:\n print(\"Unable to trace the phone number.\")\n \nprint(dir(carrier))\n","repo_name":"epythonlab/scripts","sub_path":"trace_phone.py","file_name":"trace_phone.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"4834960559","text":"import os\nimport signal\nimport subprocess\nimport urllib.parse\nimport tempfile as temp\nimport zipfile\nimport time\nimport shutil\nimport tkinter as tk\nfrom pathlib import Path\nfrom threading import Thread\n\nimport customtkinter as ctk\nimport requests\n\n\nZIP = '.zip'\nURL_BASE = 'https://www.efscode.com.br/atualizacoes/'\nDOWNLOAD = 'downloads/'\nNOME_DO_SOFTWARE = 'gerador_de_questoes_didaxis'\n\n\nclass Atualizacao(ctk.CTk):\n def __init__(self):\n self.nome_do_software = NOME_DO_SOFTWARE\n self.url_encodada = self.gera_url_atualizacao()\n self.pasta_software = self.busca_arquivo_local()\n\n self.bak_path: str = ''\n self.tmp_new_version_path: str = ''\n self.__pid = os.getpid()\n\n self.inicia_widgets()\n self.after(1000, self.inicia_thread)\n\n def inicia_thread(self):\n t1 = Thread(target=self.inicia_atualizacao)\n t1.start()\n\n def inicia_atualizacao(self):\n self.verifica_conexao_com_internet()\n self.gera_bkp()\n\n def inicia_widgets(self):\n ctk.CTk.__init__(self)\n self.geometry('550x300')\n self.title('QuestGenUpdater')\n ctk.set_default_color_theme('green')\n self.protocol(self.protocol()[0], self.evento_de_fechamento_da_tela)\n\n self.var_info = tk.StringVar()\n\n self.informacao = ctk.CTkLabel(self, text='')\n self.informacao.pack(pady=(20, 30))\n\n self.barra_de_progresso = ctk.CTkProgressBar(\n self,\n # progress_color='green',\n mode='determinate',\n orientation='horizontal',\n )\n self.barra_de_progresso.pack(padx=2, ipadx=200)\n self.barra_de_progresso.set(0)\n\n self.andamento_de_progresso = ctk.CTkLabel(self, text='')\n self.andamento_de_progresso.pack(fill='x', pady=(10, 0))\n\n self.velocidade_de_download = ctk.CTkLabel(self, text='', anchor='e')\n self.velocidade_de_download.pack(padx=(0, 20), pady=(10, 0))\n\n def verifica_conexao_com_internet(self) -> bool:\n self.atualiza_informacao('Verificando conexão com servidor.')\n try:\n requests.get(URL_BASE, timeout=100)\n return True\n except requests.exceptions.ConnectTimeout or requests.exceptions.ConnectionError:\n self.atualiza_informacao('Não foi possível se conectar à internet.')\n self.after(2000, self.evento_de_fechamento_da_tela)\n\n def gera_url_atualizacao(self) -> str:\n url_versao = URL_BASE + DOWNLOAD + self.nome_do_software + '.zip'\n encoded_url_versao = urllib.parse.quote(url_versao, safe=':/')\n return encoded_url_versao\n\n def gera_bkp(self):\n self.bak_path = temp.mktemp(prefix=f'{self.nome_do_software}_bak_')\n self.atualiza_informacao('Criando backup.')\n try:\n shutil.make_archive(self.bak_path, 'zip', self.pasta_software)\n except Exception as e:\n self.atualiza_informacao(f'Falha\\n{e}\\nao criar backup.')\n # Lidar com o erro adequadamente\n self.bak_path += '.zip'\n self.atualiza_informacao('Backup criado com sucesso.')\n\n self.baixar_nova_versao()\n\n def atualiza_informacao(self, informacao: str):\n self.informacao.configure(text=informacao)\n self.update_idletasks()\n\n def baixar_nova_versao(self):\n self.atualiza_informacao('Baixando nova versão.')\n response = requests.get(self.url_encodada, stream=True, timeout=100, )\n tamanho_total = int(response.headers.get('content-length', 0))\n tamanho_atual = 0\n inicio = time.time()\n\n with temp.NamedTemporaryFile(\n prefix=f'{self.nome_do_software}_new_', suffix=ZIP, delete=False\n ) as tmp_new_version:\n self.andamento_de_progresso.configure(text=tmp_new_version.name)\n for dados in response.iter_content(chunk_size=10000):\n tmp_new_version.write(dados)\n\n tamanho_atual += len(dados)\n self.calcula_velocidade(tamanho_atual, inicio)\n\n percentual_concluido = self.calcula_progresso(tamanho_atual, tamanho_total) * 100\n self.andamento_de_progresso.configure(text=f'{tmp_new_version.name} - {percentual_concluido:.2f}%')\n self.atualiza_barra_de_progresso(tamanho_atual, tamanho_total)\n\n self.atualiza_informacao('Testando download.')\n self.tmp_new_version_path = Path(str(tmp_new_version.name)).resolve()\n if zipfile.is_zipfile(self.tmp_new_version_path):\n self.atualiza_informacao('Download OK...')\n self.after(1000, lambda: self.atualiza_informacao('Iniciando atualização'))\n self.after(2000, self.atualiza)\n else:\n self.after(1000, lambda: self.atualiza_informacao('Falha no download'))\n self.after(2000, self.evento_de_fechamento_da_tela)\n\n def atualiza(self):\n with zipfile.ZipFile(self.tmp_new_version_path, 'r') as zip_ref:\n total_arquivos = len(zip_ref.namelist())\n self.andamento_de_progresso.configure(text=f'Extraindo arquivos... 0/{total_arquivos}')\n\n for index, arquivo in enumerate(zip_ref.namelist(), start=1):\n self.andamento_de_progresso.configure(text=f'Extraindo arquivos... {index}/{total_arquivos}')\n zip_ref.extract(arquivo, self.pasta_software)\n self.atualiza_barra_de_progresso(index, total_arquivos)\n self.andamento_de_progresso.configure(text='')\n self.atualiza_informacao(f'Iniciano: {self.nome_do_software}.exe')\n self.after(500, self.inicia_programa)\n\n def busca_arquivo_local(self):\n local = Path(__file__).resolve().parent\n if str(local) == r'C:\\Users\\Edimar\\Documents\\GitHub\\gerador_de_questoes_didaxis':\n return local / r'compilado\\dist'\n return local.parent / self.nome_do_software\n\n def atualiza_barra_de_progresso(self, valor_atual, valor_total):\n self.barra_de_progresso.set(self.calcula_progresso(valor_atual, valor_total))\n self.update_idletasks()\n\n @staticmethod\n def calcula_progresso(valor_atual, valor_total) -> float:\n try:\n progresso = valor_atual / valor_total\n return progresso\n except ZeroDivisionError:\n return 0\n\n def inicia_programa(self):\n comando = ['start', self.pasta_software / self.nome_do_software / f'{self.nome_do_software}.exe']\n try:\n subprocess.Popen(comando, shell=True)\n except Exception as e:\n print(e)\n\n self.andamento_de_progresso.configure(text='Tudo certo')\n self.after(1000, self.evento_de_fechamento_da_tela)\n\n def evento_de_fechamento_da_tela(self):\n try:\n if self.bak_path:\n os.remove(self.bak_path)\n\n if self.tmp_new_version_path:\n os.remove(self.tmp_new_version_path)\n except FileNotFoundError:\n pass\n\n os.kill(self.__pid, signal.SIGTERM)\n\n def calcula_velocidade(self, tamanho_atual, inicio):\n tempo_decorrido = time.time() - inicio\n velocidade = tamanho_atual / tempo_decorrido\n if velocidade < 1024:\n self.velocidade_de_download.configure(text=f\"Velocidade: {velocidade:.2f} bytes/s\")\n elif velocidade < 1024 * 1024:\n self.velocidade_de_download.configure(text=f\"Velocidade: {velocidade / 1024:.2f} KB/s\")\n else:\n self.velocidade_de_download.configure(text=f\"Velocidade: {velocidade / (1024 * 1024):.2f} MB/s\")\n\n\nif __name__ == '__main__':\n app = Atualizacao()\n app.mainloop()\n","repo_name":"EdimarDeSa/gerador_de_questoes_didaxis","sub_path":"QuestGenUpdater.py","file_name":"QuestGenUpdater.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"26942112236","text":"import streamlit as st\nimport altair as alt\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.metrics import confusion_matrix,accuracy_score, f1_score, recall_score, precision_score, roc_curve, roc_auc_score, auc\n\npd.options.mode.chained_assignment = None\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef print_hi():\n global x,y, knn, pred2, acc2, feature_score, x_k, f_s1, drop_list1,x_train, x_test, y_train, y_test\n df = pd.read_csv('input.csv')\n st.markdown(\"Прогнозирование послеоперационных осложнений\", unsafe_allow_html=True)\n\n # load model\n x = df.iloc[:, 0:15]\n y = df.iloc[:, -1]\n # select\n feature_score, x_k, f_s1, drop_list1 = k_best(x, y)\n\n # обучение на сокращенном наборе k-best\n x_train, x_test, y_train, y_test = scal(x_k, y, size=0.3)\n\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred('knn', x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n option = st.sidebar.selectbox('Mode', ['Загрузка выборки', 'Обучение', 'Тестирование'])\n\n if option == \"Загрузка выборки\":\n\n st.dataframe(df.head())\n st.sidebar.subheader(' Исследование')\n st.markdown(\"Установите флажок на боковой панели, чтобы просмотреть набор данных.\")\n if st.sidebar.checkbox('Основная информация'):\n\n if st.sidebar.checkbox(\"Показать столбцы\"):\n st.subheader('Список столбцов')\n all_columns = df.columns.to_list()\n st.write(all_columns)\n\n if st.sidebar.checkbox('Статистика'):\n st.subheader('Описание статистических данных')\n st.write(df.describe())\n if st.sidebar.checkbox('Пропуски?'):\n st.subheader('Наличие пропусков')\n st.write(df.isnull().sum())\n\n if st.sidebar.checkbox('Информативные показатели'):\n st.markdown('#### Используя метод фльтрации Хи2 для отбра показателей мы получили следующий результат.\\n #### Наиболее информативными показателями являются:')\n st.write(drop_list1)\n\n elif option == 'Обучение':\n st.sidebar.subheader(' Исследование')\n st.set_option('deprecation.showPyplotGlobalUse', False)\n\n if st.sidebar.checkbox('Показать набор для обучения'):\n st.write(x_k.head(50))\n st.write('Размер выборки: ', x_k.shape)\n st.write('Статистика: \\n', x_k.describe())\n\n size = st.sidebar.slider('Установите размер тестовой выборки', min_value=0.2, max_value=0.4)\n # обучение на сокращенном наборе k-best\n x_train, x_test, y_train, y_test = scal(x_k, y,size)\n if st.sidebar.checkbox('Вывод обучающих и тестовых наборов'):\n st.write('X_train: ', x_train.shape)\n st.write('y_train: ', y_train.shape)\n st.write('X_test: ', x_test.shape)\n st.write('y_test: ', y_test.shape)\n\n model = st.sidebar.selectbox('Mode', [' ','Логистическая регрессия', 'К-ближайший соседей', 'Дерево решений', 'Метод опорных векторов', 'Многослойный персептрон', 'Случайный лес'])\n if model == 'Логистическая регрессия':\n vid = 'log'\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred(vid, x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n t = round(acc2, 3) * 100\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n\n if st.sidebar.checkbox('Показать матрицу неточности'):\n # матрица неточности\n cnf_matrix = confusion_matrix(y_test, pred2)\n\n fig, ax = plt.subplots()\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title('Матрица неточности \\n', y=1.1)\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"Reds\", fmt='g')\n st.write(fig)\n\n if model == 'К-ближайший соседей':\n vid = 'knn'\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred(vid, x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n t = round(acc2, 3) * 100\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n if st.sidebar.checkbox('Показать матрицу неточности'):\n # матрица неточности\n cnf_matrix = confusion_matrix(y_test, pred2)\n\n fig, ax = plt.subplots()\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title('Матрица неточности \\n', y=1.1)\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"Reds\", fmt='g')\n st.write(fig)\n if model == 'Дерево решений':\n vid = 'tree'\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred(vid, x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n t = round(acc2, 3) * 100\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n if st.sidebar.checkbox('Показать матрицу неточности'):\n # матрица неточности\n cnf_matrix = confusion_matrix(y_test, pred2)\n\n fig, ax = plt.subplots()\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title('Матрица неточности \\n', y=1.1)\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"Reds\", fmt='g')\n st.write(fig)\n if model == 'Метод опорных векторов':\n vid = 'svm'\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred(vid, x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n t = round(acc2, 3) * 100\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n if st.sidebar.checkbox('Показать матрицу неточности'):\n # матрица неточности\n cnf_matrix = confusion_matrix(y_test, pred2)\n\n fig, ax = plt.subplots()\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title('Матрица неточности \\n', y=1.1)\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"Reds\", fmt='g')\n st.write(fig)\n if model == 'Многослойный персептрон':\n vid = 'mlp'\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred(vid, x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n t = round(acc2, 3) * 100\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n if st.sidebar.checkbox('Показать матрицу неточности'):\n # матрица неточности\n cnf_matrix = confusion_matrix(y_test, pred2)\n\n fig, ax = plt.subplots()\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title('Матрица неточности \\n', y=1.1)\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"Reds\", fmt='g')\n st.write(fig)\n if model == 'Случайный лес':\n vid = 'for'\n # обучение на сокращенном наборе k-best\n knn, pred2 = pred(vid, x_train, x_test, y_train)\n acc2 = accuracy_model(y_test, pred2)\n t = round(acc2, 3) * 100\n if t > 50:\n custom_emoji = ':blush:'\n st.info('{}'.format(custom_emoji))\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n else:\n custom_emoji = ':confused:'\n st.info('{}'.format(custom_emoji))\n st.success(\"Точность модели {0}: {1:9.2f}\".format(model,t))\n if st.sidebar.checkbox('Показать матрицу неточности'):\n # матрица неточности\n cnf_matrix = confusion_matrix(y_test, pred2)\n\n fig, ax = plt.subplots()\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title('Матрица неточности \\n', y=1.1)\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"Reds\", fmt='g')\n st.write(fig)\n else:\n st.success(\"Вы не выбрали модель\")\n\n\n elif option == \"Тестирование\":\n # Print shape and description of the data\n st.set_option('deprecation.showPyplotGlobalUse', False)\n if st.sidebar.checkbox('Классифицировать пациента?'):\n bilirubin = st.number_input(\"Билирубин\")\n neutrophils = st.number_input(\"Нейрофилы\")\n amylase = st.number_input(\"Амилазе\")\n duration = st.number_input(\"Длительность операции\")\n lymphocytes = st.number_input(\"Лимфоциты\")\n if st.button(\"Прогноз\"):\n # тестирование\n pred9 = knn.predict([[bilirubin, neutrophils, amylase, duration, lymphocytes]])\n #st.success(pred9.round(1)[0])\n if pred9 ==0:\n custom_emoji = ':blush:'\n st.info('{}'.format(custom_emoji))\n st.success(\"Осложнений нет\")\n else:\n custom_emoji = ':confused:'\n st.info('{}'.format(custom_emoji))\n st.success(\"Осложнения есть\")\n\n\ndef k_best(x,y):\n bestfeature=SelectKBest(score_func=chi2,k='all')\n fit=bestfeature.fit(x,y)\n df_score=pd.DataFrame(fit.scores_)\n df_column=pd.DataFrame(x.columns)\n feature_score=pd.concat([df_column,df_score],axis=1)\n feature_score.columns=['Specs', 'Score']\n feature_score=feature_score.sort_values(by='Score', ascending=False)\n print('Значимость показателей')\n print(feature_score)\n# выбираем показатели по важности\n f_s1=feature_score[feature_score['Score']>20]\n drop_list1 = f_s1['Specs']\n #drop_list1.head(7)\n#feature_score\n x_1=x.loc[:,drop_list1]#3\n print('Используя метод фльтрации Хи2 для отбра показателей мы получили следующий результат. Наиболее информативными показателями являются:')\n print(drop_list1)\n print('Создадим новую выборку состоящую толко из этих показателей.')\n print(x_1)\n return feature_score,x_1,f_s1,drop_list1\n\ndef scal(x,y,size):\n#шкалируем весь набор данных\n scaler = MinMaxScaler(feature_range=(0,1))\n#назначение показателейдля шклирования\n x_scaled = pd.DataFrame(scaler.fit_transform(x), columns=x.columns)\n# обучающая и тестовая выборки по полным данным\n# деление на обучающую и тестовую выборки: 80 % - 20 %\n x_train, x_test, y_train, y_test = train_test_split(x_scaled, y, test_size = size)\n return x_train, x_test, y_train, y_test\n\n# определяем функцию для оценки модели\ndef accuracy_model(y_test, model_pred):\n# доля правильных ответов алгоритма: Точность = (истинное положительное + истинно отрицательное значение) / всего\n acc = accuracy_score(y_test, model_pred)\n print(f\"The accuracy score for method is: {round(acc,3)*100}%\")\n return acc\n\ndef pred(vid, x_train, x_test,y_train):\n if vid == 'log':\n model = LogisticRegression().fit(x_train, y_train)\n pred = model.predict(x_test)\n elif vid == 'knn':\n model = KNeighborsClassifier(n_neighbors = 2).fit(x_train, y_train)\n pred = model.predict(x_test)\n elif vid == 'tree':\n model = DecisionTreeClassifier(criterion='entropy', max_leaf_nodes=6, random_state=0).fit(x_train, y_train)\n pred = model.predict(x_test)\n elif vid == 'svm':\n model = SVC().fit(x_train, y_train)\n pred = model.predict(x_test)\n elif vid == 'mlp':\n model = MLPClassifier(activation='relu', solver='lbfgs', alpha=1e-5, max_iter=1000, hidden_layer_sizes=(5, ), random_state=1).fit(x_train, y_train)\n pred = model.predict(x_test)\n elif vid == 'for':\n model = RandomForestClassifier(criterion='entropy',max_depth=2, random_state=1).fit(x_train, y_train)\n pred = model.predict(x_test)\n else:\n print(\"Такого метода нет\")\n return model, pred\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_hi()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"stuniy/pythonProject10","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14822,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"5465383757","text":"from pmdarima.arima import auto_arima\r\nimport warnings\r\nimport pandas as pd\r\nimport datetime\r\n\r\n\r\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\r\nfrom .. import averageError as ae\r\nfrom .. import dataGeter\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',\r\n FutureWarning)\r\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',\r\n FutureWarning)\r\n\r\n\r\ndef PredictFromList(measures_list, pre_len):\r\n df = pd.DataFrame(measures_list)\r\n df.columns = ['date', 'height']\r\n\r\n\r\n\r\n\r\n \r\n pred = PredictFromDF(df, pre_len)\r\n\r\n pred = pred.values.tolist()\r\n res = []\r\n\r\n for i in range(0, len(pred)):\r\n res.append((pred[i][0], pred[i][1]))\r\n\r\n return res\r\n\r\n\r\ndef PredictFromDF(df, pre_len):\r\n\r\n model = auto_arima(df['height'], start_p=1, d=1, start_q=1,\r\n max_p=3, max_q=3, start_P=1,\r\n D=1, start_Q=1, max_P=3, max_D=3,\r\n max_Q=5, m=2, seasonal=True,\r\n error_action='ignore', trace=True,\r\n suppres_warnings=True, stepwise=True,\r\n random_state=20, n_fits=50)\r\n\r\n model.fit(df['height'])\r\n\r\n pred = model.predict(n_periods=pre_len)\r\n\r\n # prepare res (dataframe)\r\n\r\n # prepare date column\r\n start_date = df['date'].iloc[-1]\r\n delta = start_date - df['date'].iloc[-2]\r\n dates = []\r\n for i in range(0, pre_len):\r\n dates.append(start_date + (i+1) * delta)\r\n\r\n res = pd.DataFrame(dates)\r\n\r\n res['predValue'] = pred\r\n res.columns = ['date', 'predValue']\r\n return res\r\n\r\n# sensor_code is a string, pre_len is a int, startdate a datetie.datetime\r\n# Return a prediction from now of pre_len values from the sensor\r\n# if a date is spécified, it will return the prediction starting at the star_date\r\n\r\n\r\ndef PredictFromSensor(sensor_code, pre_len, pred_starting_date=0):\r\n if(pred_starting_date == 0):\r\n measures = dataGeter.GetLastMeasures(sensor_code)\r\n return PredictFromList(measures, pre_len)\r\n else:\r\n start_date = pred_starting_date - datetime.timedelta(days=5)\r\n measures = dataGeter.GetMeasures(start_date, 5, sensor_code)\r\n return PredictFromList(measures, pre_len)\r\n\r\n\r\n# return a list of metrics calculated\r\n# [rmse, mae, mse, mape, r2score]\r\n# pre_len is the size of prediction\r\n# data_size is the number of timeseries used to make the average\r\ndef ComputeError(pre_len, data_size):\r\n print(\"Error computing will take few minutes...\")\r\n data = ae.GetRandomTimeSeries(data_size)\r\n tsData, tsReal = ae.CutTimeSeries(data, 24, pre_len)\r\n\r\n tsPred = []\r\n for ts in tsData:\r\n tsPred.append(PredictFromList(ts, pre_len))\r\n\r\n return ae.ComputeMetrics(tsData, tsPred, tsReal)\r\n","repo_name":"WhoIsKeyserSoze/TimeSeriesGenerationForFloodControl","sub_path":"TimeSeriesPredictor/ARMA_module/autoarima.py","file_name":"autoarima.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40033774651","text":"from input import data, sample, sample2\n\nfrom collections import defaultdict\n\ndef parse(data):\n dct = {}\n for line in data.split('\\n'):\n reactants, product = line.split(' => ')\n needed = []\n for thing in reactants.split(', '):\n amt, name = thing.split()\n needed.append((int(amt), name))\n amt, name = product.split()\n amt = int(amt)\n dct[name] = [amt, needed]\n return dct\n\ndef sum(product, amt_want):\n if product == 'ORE':\n return amt_want\n amt_get, reqs = dct[product]\n from_before = leftovers[product]\n needed = max(amt_want - from_before, 0)\n leftovers[product] -= amt_want - needed\n if needed % amt_get == 0:\n make_n = needed // amt_get\n rem = amt_get*make_n - needed\n else:\n round_up = needed + amt_get - needed % amt_get\n make_n = round_up // amt_get\n rem = round_up - needed\n leftovers[product] += rem\n ores = 0\n for amt, name in reqs:\n ores += sum(name, amt * make_n)\n return ores\n\ndct = parse(data)\n\n#part1\nleftovers = defaultdict(int)\nprint(sum('FUEL', 1))\n\n#part2\nlow = 1\nhigh = 20000000\ntril = 1000000000000\nwhile high - low > 1:\n mid = (low + high) // 2\n leftovers = defaultdict(int)\n cost = sum('FUEL', mid)\n if cost <= tril:\n low = mid\n else:\n high = mid\nprint(low)\n","repo_name":"Skaft/aoc","sub_path":"2019/day14/aoc14.py","file_name":"aoc14.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"1174459675","text":"# Add site contacts to the database\n# Daryl Herzmann 5 Jul 2002\n# 15 Apr 2005\tOnly add if they don't exist.\n\nimport pg\nfrom pyIEM import stationTable\n\nmydb = pg.connect(\"portfolio\", \"meteor.geol.iastate.edu\", 5432)\nst = stationTable.stationTable(\"/mesonet/TABLES/awos.stns\")\n\nportfolio = \"iaawos\"\nname = \"Allen Sells\"\nemail = \"allen.sells@dot.iowa.gov\"\n\nfor station in st.ids:\n sql = (\n \"SELECT * from iem_site_contacts WHERE portfolio = '%s' and \\\n email = '%s' and s_mid = '%s'\"\n % (portfolio, email, station)\n )\n rs = mydb.query(sql).dictresult()\n\n if len(rs) == 0:\n sql = (\n \"INSERT into iem_site_contacts (portfolio, s_mid, \\\n name, phone, email) VALUES ('%s', '%s', \\\n '%s', ' ', '%s')\"\n % (portfolio, station, name, email)\n )\n print(\"Adding for %s\" % (station,))\n mydb.query(sql)\n","repo_name":"akrherz/DEV","sub_path":"jportfolio/makeContact.py","file_name":"makeContact.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"19299238595","text":"\ndef main():\n a = 17\n b = 21\n print(f\"main:\\t a = {a}, b = {b}\")\n swap(a,b)\n print(f\"main:\\t a = {a}, b = {b}\")\n\n def swap2(a, b):\n a, b = b, a\n print(f\"swap2: a = {a}, b = {b}\")\n\n swap2(a,b)\n print(f\"main:\\t a = {a}, b = {b}\")\n\n def swap3():\n a, b = b, a # this will give error\n # the function can only see global variables\n # not a local one that is declared in a higher scope\n print(f\"swap3: a = {a}, b = {b}\")\n\n swap3()\n print(f\"main:\\t a = {a}, b = {b}\")\n\n\n\ndef swap(a, b):\n a, b = b, a\n print(f\"swap:\\t a = {a}, b = {b}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"williamszk/c_cpp_study","sub_path":"c_study/book-effective-c-seacord/221125_01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"23981864257","text":"from year_2020.tools import open_file_line_by_line\n\n\ndef is_valid_part1(condition, pwd):\n \"\"\"\n Test is a password is valid followed the specified conditions.\n @param condition: \"min - max char\" Min and Max iteration of the char.\n @param pwd: The password to test.\n @return: A bool. True if the password is valid or False if not.\n \"\"\"\n mini, string = condition.split(\"-\")\n maxi = string.split(\" \")[0]\n char = condition[-1]\n result = False\n i = 0\n char_nb = 0\n while i < len(pwd):\n if pwd[i] == char:\n char_nb += 1\n i += 1\n if int(mini) <= char_nb <= int(maxi):\n result = True\n return result\n\n\ndef is_valid_part2(condition, pwd):\n \"\"\"\n Test is a password is valid followed the specified conditions.\n @param condition: \"pos1-pos2 char\" Possible position of the char, the char can only be in one position.\n @param pwd: The password to test.\n @return: A bool. True if the password is valid or False if not.\n \"\"\"\n pos1, string = condition.split(\"-\")\n pos2 = string.split(\" \")[0]\n char = condition[-1]\n result = False\n\n if pwd[int(pos1)] == char:\n result = True\n if pwd[int(pos2)] == char:\n if result:\n result = False\n else:\n result = True\n return result\n\n\ndef nb_of_valid_pwd(input_list, part):\n \"\"\"\n Get the number of valid password.\n @param input_list: each element are \"number-number char: string\"\n @param part: 1 or 2. The part of the day.\n @return: The number of valid password\n \"\"\"\n result = 0\n for line in input_list:\n element = line.split(\":\")\n if part == 1:\n if is_valid_part1(element[0], element[1]):\n result += 1\n else:\n if is_valid_part2(element[0], element[1]):\n result += 1\n return result\n\n\ndef main():\n print(\"Advent of code day 2\")\n input_list = open_file_line_by_line(\"./input.txt\")\n print(\"Part 1: \" + str(nb_of_valid_pwd(input_list, 1)))\n print(\"Part 2: \" + str(nb_of_valid_pwd(input_list, 2)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexandre-petitjean/my_advent_of_code","sub_path":"year_2020/day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"32783950360","text":"import RPi.GPIO as GPIO\nfrom getkey import getkey, keys\nimport time\n\n\nPIN = 18\nPWMA1 = 6 \nPWMA2 = 13\nPWMB1 = 20\nPWMB2 = 21\nD1 = 12\nD2 = 26\n\nPWM = 50\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(PIN,GPIO.IN,GPIO.PUD_UP)\nGPIO.setup(PWMA1,GPIO.OUT)\nGPIO.setup(PWMA2,GPIO.OUT)\nGPIO.setup(PWMB1,GPIO.OUT)\nGPIO.setup(PWMB2,GPIO.OUT)\nGPIO.setup(D1,GPIO.OUT)\nGPIO.setup(D2,GPIO.OUT)\np1 = GPIO.PWM(D1,500)\np2 = GPIO.PWM(D2,500)\np1.start(50)\np2.start(50)\n\ndef\tset_motor(A1,A2,B1,B2):\n\tGPIO.output(PWMA1,A1)\n\tGPIO.output(PWMA2,A2)\n\tGPIO.output(PWMB1,B1)\n\tGPIO.output(PWMB2,B2)\n\ndef forward():\n\tGPIO.output(PWMA1,1)\n\tGPIO.output(PWMA2,0)\n\tGPIO.output(PWMB1,1)\n\tGPIO.output(PWMB2,0)\n\ndef stop():\n\tset_motor(0,0,0,0)\n\ndef reverse():\n\tset_motor(0,1,0,1)\n\ndef left():\n\tset_motor(1,0,0,0)\n\ndef right():\n\tset_motor(0,0,1,0)\n\nprint('Motor Test Start ...')\n\ntry:\n\twhile True:\n\t\tkey = getkey()\n\t\tif(key != None):\n\t\t\tif key == keys.UP:\n\t\t\t\tforward()\n\t\t\t\tprint(\"forward\")\n\t\t\tif key == keys.LEFT:\n\t\t\t\tleft()\n\t\t\t\tprint(\"left\")\n\t\t\tif key == 's':\n\t\t\t\tstop()\n\t\t\t\tprint(\"stop\")\n\t\t\tif key == keys.RIGHT:\n\t\t\t\tright()\n\t\t\t\tprint(\"right\")\n\t\t\tif key == keys.DOWN:\n\t\t\t\treverse()\t\t\n\t\t\t\tprint(\"reverse\")\n\t\t\tif key == 'o':\n\t\t\t\tif(PWM + 10 < 101):\n\t\t\t\t\tPWM = PWM + 10\n\t\t\t\t\tp1.ChangeDutyCycle(PWM)\n\t\t\t\t\tp2.ChangeDutyCycle(PWM)\n\t\t\t\t\tprint(PWM)\n\t\t\tif key == 'l':\n\t\t\t\tif(PWM - 10 > -1):\n\t\t\t\t\tPWM = PWM - 10\n\t\t\t\t\tp1.ChangeDutyCycle(PWM)\n\t\t\t\t\tp2.ChangeDutyCycle(PWM)\n\t\t\t\t\tprint(PWM)\n\t\t\tif key == 'q':\n\t\t\t\tbreak\nexcept KeyboardInterrupt:\n\tGPIO.cleanup();\n\tprint(\"Keyboard Error\")\n\n\nGPIO.cleanup();","repo_name":"DanielZ321/PiRobotCode","sub_path":"motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"70953246127","text":"\n#LITMOD 4.0\n#gmt minmax substitute for interface\n#Eldar Baykiev, 2020\n\nimport numpy as np\nimport sys\n\nif len(sys.argv) != 2:\n print('ERROR')\n exit(-1)\n \nfilename = sys.argv[1]\nimport os\nif not (os.path.isfile(filename)):\n print('ERROR')\n exit(-1)\n \ntry:\n grd = np.loadtxt(filename)\nexcept:\n print('ERROR')\n exit(-1)\n \ncol_n = len(grd[0, :])\nfor i in range(len(grd[0, :])):\n print(str(np.min(grd[:, i])) + '\\t' + str(np.max(grd[:, i])), end='\\t')\n \nprint('\\n', end='')\n\n\n\n","repo_name":"eldarbaykiev/litmod","sub_path":"src_intf/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"2"}
+{"seq_id":"2708074400","text":"from conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom conans.errors import ConanInvalidConfiguration\nimport contextlib\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass XorgCfFilesConan(ConanFile):\n name = \"xorg-cf-files\"\n description = \"Imake configuration files & templates\"\n topics = (\"conan\", \"imake\", \"xorg\", \"template\", \"configuration\", \"obsolete\")\n license = \"MIT\"\n homepage = \"https://gitlab.freedesktop.org/xorg/util/cf\"\n url = \"https://github.com/conan-io/conan-center-index\"\n settings = \"os\", \"compiler\"\n\n exports_sources = \"patches/*\"\n generators = \"pkg_config\"\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def requirements(self):\n self.requires(\"xorg-macros/1.19.3\")\n self.requires(\"xorg-proto/2021.4\")\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n if self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"automake/1.16.3\")\n\n def configure(self):\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def validate(self):\n if tools.is_apple_os(self.settings.os):\n raise ConanInvalidConfiguration(\"This recipe does not support Apple operating systems.\")\n\n def package_id(self):\n del self.info.settings.compiler\n # self.info.settings.os # FIXME: can be removed once c3i is able to test multiple os'es from one common package\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n @property\n def _user_info_build(self):\n return getattr(self, \"user_info_build\", self.deps_user_info)\n\n @contextlib.contextmanager\n def _build_context(self):\n if self.settings.compiler == \"Visual Studio\":\n with tools.vcvars(self):\n env = {\n \"CC\": \"{} cl -nologo\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n \"CXX\": \"{} cl -nologo\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n \"CPP\": \"{} cl -E\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n }\n with tools.environment_append(env):\n yield\n else:\n yield\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=self._settings_build.os == \"Windows\")\n self._autotools.libs = []\n self._autotools.configure(configure_dir=self._source_subfolder)\n return self._autotools\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.libdirs = []\n\n self.user_info.CONFIG_PATH = os.path.join(self.package_folder, \"lib\", \"X11\", \"config\").replace(\"\\\\\", \"/\")\n","repo_name":"orgTestCodacy11KRepos110MB/repo-4943-conan-center-index","sub_path":"recipes/xorg-cf-files/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"29656454293","text":"\nfrom flask import Blueprint, render_template, request, redirect, url_for\n\nfrom flask_login import login_user, logout_user, current_user\n\nfrom app.config import db\n\nfrom app.models.user import User\nfrom app.models.product import Product\nfrom app.models.therapy import Therapy\nfrom app.models.order import Order\n\nfrom werkzeug.utils import secure_filename\n\nfrom app.config import UPLOAD_FOLDER\n\nimport os\n\n\n# Blueprint admin\nadmin = Blueprint('admin', __name__,\n url_prefix=\"/admin\",\n template_folder=\"../../templates\",\n static_folder=\"../../static\")\n\n\nALLOWED_EXT = ['jpeg', 'jpg', 'png']\n\n\n# URL homepage admin\n@admin.route('/', methods=['GET', 'POST'])\ndef index():\n if(request.method == 'GET'):\n return render_template('admin/index.html')\n if(request.method == 'POST'):\n email = request.form['email']\n password = request.form['password']\n func = User.query.filter_by(email=email).first()\n if(not func or not func.verify_password(password) or func.user_type != \"admin\"):\n return render_template('admin/index.html',\n error=True)\n else:\n login_user(func)\n return render_template('admin/index.html')\n\n\n# URL to logout Admin/funcionario\n@admin.route('/logout', methods=['GET'])\ndef logout():\n logout_user()\n return redirect('/admin')\n\n\n# URL Profile admin\n@admin.route('/profile', methods=['GET'])\ndef profile():\n if(request.method == 'GET'):\n return render_template('admin/profile.html')\n\n\n# UPDATE Admin\n\n# Password\n@admin.route('/profile/password/', methods=['POST'])\ndef change_password():\n if(request.method == 'POST'):\n pwd = request.form['old_password']\n new_pwd = request.form['new_password']\n user = current_user\n if user and user.verify_password(pwd):\n user.password = new_pwd\n db.session.commit()\n logout_user()\n return redirect(url_for('admin.index'))\n else:\n return render_template('admin/profile.html',\n error=True)\n\n\n# Data\n@admin.route('/profile/data', methods=['POST'])\ndef change_data():\n if(request.method == 'POST'):\n user = current_user\n if(user):\n email = request.form['email']\n cep = request.form['cep']\n number = request.form['number']\n complement = request.form['complement']\n fname = request.form['fname']\n lname = request.form['lname']\n cpf = request.form['cpf']\n pwd = request.form['password']\n\n # Check if already exists user with the form's e-mail\n check_email = User.query.filter_by(email=email).first()\n if(check_email):\n if(check_email.email != user.email):\n return render_template('admin/profile.html',\n email_error=True)\n else:\n pass\n\n # verify's password\n if(user.verify_password(pwd)):\n try:\n user.cep = cep\n if(user.cep == cep):\n if(user.check_cpf() is True):\n user.email = email\n user.number = number\n user.complement = complement\n user.fname = fname\n user.lname = lname\n user.cpf = cpf\n user.set_address()\n db.session.commit()\n logout_user()\n return redirect(url_for('admin.index'))\n else:\n raise ValueError('Valor de CEP inválido...')\n except Exception:\n return render_template('admin/profile.html',\n error=True)\n else:\n return render_template('admin/profile.html',\n error=True)\n\n\n# PRODUCT\n\n\n# Index Products\n@admin.route('/products', methods=['GET'])\ndef admin_products():\n if (request.method == 'GET'):\n return render_template('admin/products/products.html')\n\n\n# READ all products\n@admin.route('/products/list', methods=['GET'])\ndef list_products():\n if (request.method == 'GET'):\n all_products = Product.query.all()\n return render_template('admin/products/list.html',\n all_products=all_products)\n\n\n# URL to delete a product by it's id\n@admin.route('/products/delete/', methods=['GET'])\ndef delete_product(id_product):\n if (request.method == 'GET'):\n product = Product.query.get(id_product)\n if (product):\n os.remove(f\"{UPLOAD_FOLDER}/Product/{product.img}\")\n db.session.delete(product)\n db.session.commit()\n return redirect('/admin/products')\n else:\n return redirect(url_for('admin.list_products'))\n\n\n# Add new product\n@admin.route('/products/add', methods=['GET', 'POST'])\ndef add_product():\n if (request.method == 'GET'):\n return render_template('admin/products/add.html')\n if (request.method == 'POST'):\n name = request.form['name']\n description = request.form['description']\n size = request.form['size']\n price = request.form['price']\n img = request.files['img']\n if(img):\n img_filename = secure_filename(img.filename)\n\n # Check if there's a valid extension\n check_ext = img_filename.split('.')\n if(check_ext[-1] not in ALLOWED_EXT):\n return render_template('admin/products/add.html',\n ext_error=True)\n\n # Check if already exists an img with same name\n check_img = Product.query.filter_by(img=img_filename).first()\n if(check_img):\n return render_template('admin/products/add.html',\n upload_error=True)\n\n # Save img inside UPLOAD_FOLDER\n img.save(os.path.join(f\"{UPLOAD_FOLDER}/Product\", img_filename))\n\n # Check if price has 2 decimals\n check_decimal = list(price.split(\".\"))[-1]\n if(len(check_decimal) < 2):\n price += \"0\"\n new_product = Product(name=name,\n description=description,\n size=size,\n price=price,\n img=img_filename)\n\n # Checks if already exists a product with same name\n check_product = Product.query.filter_by(name=name).first()\n if (check_product):\n return render_template('admin/products/add.html',\n error=True)\n else:\n db.session.add(new_product)\n db.session.commit()\n return redirect('/admin/products')\n\n return render_template('admin/products/add.html',\n ext_error=True)\n\n\n# UPDATE a product\n@admin.route('/products/change/', methods=['GET', 'POST'])\ndef change_product(id_product):\n product = Product.query.get(id_product)\n if(product):\n if (request.method == 'GET'):\n return render_template('admin/products/update.html',\n product=product)\n if (request.method == 'POST'):\n name = request.form['name']\n description = request.form['description']\n size = request.form['size']\n price = request.form['price']\n\n # Check if price has 2 decimals\n check_decimal = list(price.split(\".\"))[-1]\n if(len(check_decimal) < 2):\n price += \"0\"\n\n # Check if already exists a product with that name\n check_product = Product.query.filter_by(name=name).first()\n if(check_product):\n if(product.name != check_product.name):\n return render_template('admin/products/update.html',\n error=True, product=product)\n\n # Get img and set a secure_filename\n img = request.files['img']\n if(img):\n img_filename = secure_filename(img.filename)\n\n # Check if there's a valid extension\n check_ext = img_filename.split('.')\n if(check_ext[-1] not in ALLOWED_EXT):\n return render_template('admin/products/update.html',\n ext_error=True,\n product=product)\n\n # Check if already exists an img with same name\n check_img = Product.query.filter_by(img=img_filename).first()\n if(check_img):\n if(img_filename != check_img.img):\n return render_template('admin/products/update.html',\n upload_error=True,\n product=product)\n\n # Update new_img inside UPLOAD_FOLDER\n try:\n os.remove(f\"{UPLOAD_FOLDER}/Product/{product.img}\")\n except Exception:\n pass\n img.save(os.path.join(\n f\"{UPLOAD_FOLDER}/Product\", img_filename))\n\n # Update image's name in product's column\n product.img = img_filename\n product.name = name\n product.description = description\n product.size = size\n product.price = price\n db.session.commit()\n return redirect('/admin/products')\n return render_template('admin/products/update.html',\n ext_error=True,\n product=product)\n\n\n# THERAPY\n\n\n# Index Therapies\n@admin.route('/therapies', methods=['GET'])\ndef admin_therapies():\n if (request.method == 'GET'):\n return render_template('admin/therapies/therapies.html')\n\n\n# READ all therapies\n@admin.route('/therapies/list', methods=['GET'])\ndef list_therapies():\n if (request.method == 'GET'):\n all_therapies = Therapy.query.all()\n return render_template('admin/therapies/list.html',\n all_therapies=all_therapies)\n\n\n# ADD new therapy\n@admin.route('/therapies/add', methods=['GET', 'POST'])\ndef add_therapy():\n if (request.method == 'GET'):\n return render_template('admin/therapies/add.html')\n if (request.method == 'POST'):\n name = request.form['name']\n description = request.form['description']\n price = request.form['price']\n img = request.files['img']\n if(img):\n img_filename = secure_filename(img.filename)\n\n # Check if there's a valid extension\n check_ext = img_filename.split('.')\n if(check_ext[-1] not in ALLOWED_EXT):\n return render_template('admin/therapies/add.html',\n ext_error=True)\n\n # Check if already exists an img with same name\n check_img = Therapy.query.filter_by(img=img_filename).first()\n if(check_img):\n if(img_filename != check_img.img):\n return render_template('admin/therapies/add.html',\n upload_error=True)\n\n # Save img inside UPLOAD_FOLDER\n img.save(os.path.join(f\"{UPLOAD_FOLDER}/Therapy\", img_filename))\n\n # Check if price has 2 decimals\n check_decimal = list(price.split(\".\"))[-1]\n if(len(check_decimal) < 2):\n price += \"0\"\n\n new_therapy = Therapy(name=name,\n description=description,\n price=price,\n img=img_filename)\n # Checks if already exists a therapy with same name\n check_therapy = Therapy.query.filter_by(name=name).first()\n if (check_therapy):\n return render_template('admin/therapies/add.html',\n error=True)\n else:\n db.session.add(new_therapy)\n db.session.commit()\n return redirect('/admin/therapies')\n\n return render_template('admin/therapies/add.html',\n ext_error=True)\n\n\n# DELETE a therapy\n@admin.route('/therapies/delete/', methods=['GET'])\ndef delete_therapy(id_therapy):\n if (request.method == 'GET'):\n therapy = Therapy.query.get(id_therapy)\n if (therapy):\n os.remove(f\"{UPLOAD_FOLDER}/Therapy/{therapy.img}\")\n db.session.delete(therapy)\n db.session.commit()\n return redirect('/admin/therapies')\n else:\n return redirect('/admin/therapies')\n\n\n# UPDATE a therapy\n@admin.route('/therapies/change/', methods=['GET', 'POST'])\ndef change_therapy(id_therapy):\n therapy = Therapy.query.get(id_therapy)\n if(therapy):\n if(request.method == 'GET'):\n return render_template('admin/therapies/update.html',\n therapy=therapy)\n if(request.method == 'POST'):\n name = request.form['name']\n description = request.form['description']\n price = request.form['price']\n\n # Check if price has 2 decimals\n check_decimal = list(price.split(\".\"))[-1]\n if(len(check_decimal) < 2):\n price += \"0\"\n\n # Check if already exists a therapy with that name\n check_therapy = Therapy.query.filter_by(name=name).first()\n if(check_therapy):\n if(therapy.name != check_therapy.name):\n return render_template('admin/therapies/update.html',\n error=True, therapy=therapy)\n\n # Get img and set a secure_filename\n img = request.files['img']\n if(img):\n img_filename = secure_filename(img.filename)\n\n # Check if there's a valid extension\n check_ext = img_filename.split('.')\n if(check_ext[-1] not in ALLOWED_EXT):\n return render_template('admin/therapies/update.html',\n ext_error=True,\n therapy=therapy)\n\n # Check if already exists an img with same name\n check_img = Therapy.query.filter_by(img=img_filename).first()\n if(check_img):\n if(img_filename != check_img.img):\n return render_template('admin/therapies/update.html',\n upload_error=True,\n therapy=therapy)\n\n # Update new_img inside UPLOAD_FOLDER\n try:\n os.remove(f\"{UPLOAD_FOLDER}/Therapy/{therapy.img}\")\n except Exception:\n pass\n img.save(os.path.join(\n f\"{UPLOAD_FOLDER}/Therapy\", img_filename))\n\n # Update image's name in therapy's column\n therapy.img = img_filename\n therapy.name = name\n therapy.description = description\n therapy.price = price\n db.session.commit()\n return redirect('/admin/therapies')\n return render_template('admin/therapies/update.html',\n ext_error=True,\n therapy=therapy)\n\n\n# ORDERS\n\n\n# List orders\n@admin.route('/orders', methods=['GET'])\ndef orders():\n user = current_user\n if(user):\n if(request.method == 'GET'):\n all_orders = Order.query.all()\n return render_template('admin/orders.html',\n all_orders=all_orders)\n\n return render_template('admin/orders.html',\n error=True)\n\n\n# Display a certain order\n@admin.route('/order/', methods=['GET'])\ndef get_order(order_id):\n user = current_user\n if(user):\n if(request.method == 'GET'):\n order = Order.query.get(order_id)\n # set a list of products and therapies objects\n user_products = []\n user_therapies = []\n for product in order.products:\n item = Product.query.get(product)\n user_products.append(item)\n for therapy in order.therapies:\n item = Therapy.query.get(therapy)\n user_therapies.append(item)\n\n owner = User.query.get(order.id_user)\n return render_template('admin/close_order.html',\n order=order,\n owner=owner,\n user_products=user_products,\n user_therapies=user_therapies)\n\n\n# Close an order\n@admin.route('/order/delete/', methods=['GET'])\ndef delete_order(order_id):\n user = current_user\n if(user):\n if(request.method == 'GET'):\n order = Order.query.get(order_id)\n db.session.delete(order)\n db.session.commit()\n\n return redirect(url_for('admin.orders'))\n\n\n# USERS\n\n\n# List all users\n@admin.route('/users', methods=['GET', 'POST'])\ndef users():\n if(request.method == 'GET'):\n all_users = User.query.all()\n return render_template('admin/users.html',\n all_users=all_users)\n\n\n# Delete user\n@admin.route('/users/delete/', methods=['GET'])\ndef delete_user(user_id):\n user = current_user\n if(user):\n if(request.method == 'GET'):\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(url_for('admin.users'))\n","repo_name":"terrotar/OPE","sub_path":"app/blueprints/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":18201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"17787504481","text":"import random \nfrom torchaudio.sox_effects import apply_effects_tensor\n\ndef crop_segment(tensor, tgt_dur, sample_rate=16000):\n src_dur = len(tensor) / sample_rate\n random_shift = random.uniform(0, src_dur - tgt_dur)\n audio_tensor, _ = apply_effects_tensor(\n tensor.unsqueeze(0),\n sample_rate,\n [\n [\"pad\", f\"{tgt_dur}\", f\"{tgt_dur}\"],\n [\n \"trim\",\n f\"{tgt_dur + random_shift}\",\n f\"{tgt_dur}\",\n ],\n ],\n )\n return audio_tensor.squeeze(0)","repo_name":"nervjack2/LabTask1","sub_path":"data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"2075362519","text":"#!/usr/bin/python3\n\"\"\"holds places reviews\"\"\"\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, request\nfrom models import storage\nfrom models.review import Review\nfrom models.user import User\nfrom models.place import Place\n\n\n@app_views.route('/places//reviews', strict_slashes=False)\ndef all_reviews(place_id):\n \"\"\"Retrieves the list of all Review objects of a Place\"\"\"\n all_reviews = []\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n for review in place.reviews:\n all_reviews.append(review.to_dict())\n return (jsonify(all_reviews))\n\n\n@app_views.route('/reviews/', methods=['GET'],\n strict_slashes=False)\ndef get_review(review_id):\n \"\"\"Retrieves a Review object\"\"\"\n review_to_get = storage.get(Review, review_id)\n if review_to_get is None:\n abort(404)\n return jsonify(review_to_get.to_dict())\n\n\n@app_views.route('/reviews/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_review(review_id):\n \"\"\"Deletes a Review object\"\"\"\n review_to_delete = storage.get(Review, review_id)\n if review_to_delete is None:\n abort(404)\n review_to_delete.delete()\n storage.save()\n return jsonify({})\n\n\n@app_views.route('/places//reviews', methods=['POST'],\n strict_slashes=False)\ndef post_review(place_id):\n \"\"\"Creates a Review\"\"\"\n review_to_post = request.get_json()\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n if review_to_post is None:\n return (jsonify({\"error\": \"Not a JSON\"}), 400)\n user_id = review_to_post.get(\"user_id\")\n if user_id is None:\n return (jsonify({\"error\": \"Missing user_id\"}), 400)\n user_found = storage.get(User, review_to_post['user_id'])\n if user_found is None:\n abort(404)\n review_text = review_to_post.get(\"text\")\n if review_text is None:\n return (jsonify({\"error\": \"Missing text\"}), 400)\n new = Review(**review_to_post)\n storage.new(new)\n storage.save()\n return (jsonify(new.to_dict()), 201)\n\n\n@app_views.route('/reviews/', methods=['PUT'],\n strict_slashes=False)\ndef update_review(review_id):\n \"\"\" Updates a Review object\"\"\"\n old_review = storage.get(Review, review_id)\n if old_review is None:\n abort(404)\n req_body = request.get_json()\n if req_body is None:\n return (jsonify({\"error\": \"Not a JSON\"}), 400)\n to_ignore = [\"id\", \"user_id\", \"place_id\", \"created_at\", \"updated_at\"]\n for key, value in req_body.items():\n if key not in to_ignore:\n setattr(old_review, key, value)\n\n old_review.save()\n storage.save()\n return (jsonify(old_review.to_dict()), 200)\n","repo_name":"salahbesbes/AirBnB_clone_v3","sub_path":"api/v1/views/places_reviews.py","file_name":"places_reviews.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"2178513173","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport os\nimport subprocess\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))\nfrom file_tools import get_file_paths_from_directory\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/filesystem-copy-to-directory\n \"\"\"\n def __init__(self, input_dir, output_dir, **kwargs):\n other_args = kwargs.get(str('other_args'), '')\n\n # Process files and directories.\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n\n if os.path.isfile(item_path):\n self.extract_strings(item_path, output_dir, other_args)\n\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.extract_strings(contained_file, output_dir, other_args)\n\n @staticmethod\n def extract_strings(input_file, output_dir, other_args):\n input_file_name, input_extension = os.path.splitext(os.path.basename(input_file))\n output_file = os.path.join(output_dir, input_file_name + '.txt')\n\n command = '/usr/bin/strings '\n if len(other_args) > 0:\n command += other_args + ' '\n command += '\"%s\"' % input_file\n\n print('Calling: %s' % command)\n\n with open(output_file, 'w') as file_handler:\n exit_code = subprocess.call(command, shell=True, stdout=file_handler)\n\n if exit_code > 0:\n sys.exit(exit_code)\n","repo_name":"Averroes/droppy-workspace","sub_path":"Tasks/FileSystem.Strings/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"37223852001","text":"import operator\nfrom datetime import datetime\n\nimport phonenumbers\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ContentType\n\nfrom data.config import ADMINS\nfrom loader import dp, test, db, results\nfrom states.answer_state import AnswersState\nfrom states.end import EndState\nfrom states.image import ImageState\nfrom states.start import StartState\nfrom states.time import TimeState\nfrom phonenumbers import format_number\n\n\n@dp.message_handler(text=\"/new_test\", user_id=ADMINS)\nasync def new_test(message: types.Message):\n _d = db.select_user(id=message.from_user.id)\n if _d is None:\n await message.reply(\"Bazadan sizni topa olmadik. Ro'yxatdan o'ting, /start comandasining ustiga bosish orqali!\")\n return\n await message.reply(\"Avvalo test rasmlarini jo'nating, to'xtatish uchun stop so'zini yozing\")\n await ImageState.waiting.set()\n\n\n@dp.message_handler(user_id=ADMINS)\nasync def admin_test(message: types.Message):\n _d = db.select_user(id=message.from_user.id)\n if _d is None:\n await message.reply(\"Bazadan sizni topa olmadik. Ro'yxatdan o'ting, /start comandasining ustiga bosish orqali!\")\n return\n _end = False\n txt = message.text\n if txt[-1] == \".\":\n _end = True\n txt = txt[:-1]\n print(f'txtttttttttttttttttttttttttttttttttttttttttttttttt {txt}a')\n _data = results.select_all_users()\n d = test.select_user(id=txt)\n if _data is not None and d is not None:\n data = []\n for d in _data:\n if str(d[5]) == txt:\n print('in')\n data.append(d)\n print(f'data: {data}')\n _sort = {}\n for res in data:\n _sort[str(res[0])] = res[1]\n _sort = dict(sorted(_sort.items(), reverse=True, key=operator.itemgetter(1)))\n result = \"\"\n t = 0\n for k, v in _sort.items():\n t += 1\n r = results.select_user(id=k)\n user = db.select_user(id=int(r[4]))\n num = format_number(phonenumbers.parse(user[2], \"UZ\"), phonenumbers.PhoneNumberFormat.INTERNATIONAL)\n percent = str(100 * r[1] / len(str(r[8]).split(',')))\n # time = ((datetime.fromtimestamp(r[7]))-(datetime.fromtimestamp(r[6]))).seconds\n # __t = datetime.fromtimestamp(time).strftime(\"%H:%M:%S\")\n p1 = f\"{t}: 👤{user[1]}\\n✅{r[1]}, ❌{r[2]}, ℹ️{r[3]}, {percent[:4]}%\"\n p2 = \"\\n\\n\" if _end else f\"\\n🅰️@{user[5]} ☎️{str(num)[5:]}\\n\\n\"\n result += f\"{p1}{p2}\"\n if len(result) != 0:\n await message.reply(result)\n else:\n await message.reply(\"Hali bu testni heckkim ishlamagan\")\n else:\n await message.reply(\"Test topilmadi\")\n\n\n@dp.message_handler(state=ImageState.waiting, content_types=ContentType.PHOTO)\nasync def set_mages(message: types.Message, state: FSMContext):\n data = await state.get_data()\n id = f\"{message.photo[-1].file_id},\"\n if data.get('data') is not None:\n print(data.get('data'))\n res = data.get('data') + id\n else:\n res = id\n await state.set_data({\n \"data\": res\n })\n\n\n@dp.message_handler(state=ImageState.waiting, content_types=ContentType.TEXT)\nasync def close(message: types.Message, state: FSMContext):\n if message.text.lower() == \"stop\":\n count = len(test.select_all_users()) + 1\n d = (await state.get_data(\"data\")).get(\"data\")\n print(f\"count {count}\\n\\n\\n\\n\\n\\n\\n\")\n test.add_user(id=count, media=d, answers=\"No data\", start=datetime.now(), end=datetime.now(), t=90)\n await AnswersState.waiting.set()\n await message.reply(\n \"Endi esa javoblarni kiritib chiqamiz, ketma ketlikda kalitlarni kiritib keta verasiz. abdcebcd tariqasida.\")\n else:\n await message.answer(\"To'xtatish uchun stop so'zini yozing\")\n\n\n@dp.message_handler(state=ImageState.waiting)\nasync def type_error(message: types.Message):\n await message.reply(\"Rasmdan boshqa file turi qabul qilinmaydi\")\n\n\n@dp.message_handler(state=AnswersState.waiting, content_types=ContentType.TEXT)\nasync def answers(message: types.Message, state: FSMContext):\n res = message.text.lower().replace(\"a\", \"\").replace(\"b\", \"\").replace(\"c\", \"\").replace(\"d\", \"\").replace(\"e\", \"\")\n print(f\"res: {res}\")\n if len(res) == 0:\n count = len(test.select_all_users())\n print(f\"count {count}\\n\\n\\n\\n\\n\\n\\n\")\n test.update_user_answers(answers=message.text, id=count)\n await message.reply(f\"Tesni boshlanish vaqtini kiriting, misol uchun: 17:35\")\n await StartState.waiting.set()\n else:\n await message.answer(\"Testda faqat 'a,b,c,d,e' kalitlarini kiritish mumkin\")\n\n\n@dp.message_handler(state=StartState.waiting, content_types=ContentType.TEXT)\nasync def start(message: types.Message, state: FSMContext):\n count = len(test.select_all_users())\n print(f\"count {count}\\n\\n\\n\\n\\n\\n\\n\")\n await EndState.waiting.set()\n t = message.text.split(\":\")\n hour = int(t[0])\n minute = int(t[1])\n st = datetime.today().replace(hour=hour, minute=minute)\n print(st.timestamp())\n test.update_user_start(id=count, time=st.timestamp())\n await message.reply(\"Tesni tugash vaqtini kiriting, misol uchun: 20:00\")\n\n\ndef hello():\n print(\"Salmcha\\n\\n\\n\\n\\n\")\n\n\n@dp.message_handler(state=EndState.waiting, content_types=ContentType.TEXT)\nasync def end(message: types.Message, state: FSMContext):\n count = len(test.select_all_users())\n print(f\"count {count}\\n\\n\\n\\n\\n\\n\\n\")\n await TimeState.waiting.set()\n t = message.text.split(\":\")\n hour = int(t[0])\n minute = int(t[1])\n st = datetime.today().replace(hour=hour, minute=minute)\n test.update_user_end(id=count, time=st.timestamp())\n await message.reply(\"Tesni davom etish daqiqasini kiriting, misol uchun 90\")\n\n\n@dp.message_handler(state=TimeState.waiting, content_types=ContentType.TEXT)\nasync def duration(message: types.Message, state: FSMContext):\n count = len(test.select_all_users())\n print(f\"count {count}\\n\\n\\n\\n\\n\\n\\n\")\n test.update_user_time(id=count, time=int(message.text))\n t = test.select_user(id=count)\n sta = datetime.fromtimestamp(t[3]).strftime(\"%H:%M\")\n end = datetime.fromtimestamp(t[4]).strftime(\"%H:%M\")\n print(t)\n print(sta)\n print(end)\n await message.reply(\n f\"🆔Test kodi {count}\\n🕔Test vaqti: {sta}-{end}\\n⏳Vaqt: {message.text}\\n📋Savollar soni: {len(t[2])}\\n\\n@sardor_math_test_bot - testni shu yerda yeching!\")\n await state.finish()\n","repo_name":"IslomjonovAbdulazim/sardor-math-test","sub_path":"handlers/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"42069914085","text":"import sys\nimport os\nimport h5py\nimport numpy as np\nimport NeuroAnalysisTools.HighLevel as hl\nimport NeuroAnalysisTools.core.FileTools as ft\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\n\n\nlam = 0.05\nplot_chunk_size = 5000\nprocess_num = 5\n\n\ndef plot_traces_chunks(traces, labels, chunk_size, roi_ind):\n \"\"\"\n\n :param traces: np.array, shape=[trace_type, t_num]\n :param labels:\n :param chunk_size:\n :param figures_folder:\n :param roi_ind:\n :return:\n \"\"\"\n\n t_num = traces.shape[1]\n chunk_num = t_num // chunk_size\n\n chunks = []\n for chunk_ind in range(chunk_num):\n chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size])\n\n if t_num % chunk_size != 0:\n chunks.append([chunk_num * chunk_size, t_num])\n\n v_max = np.amax(traces)\n v_min = np.amin(traces)\n\n fig = plt.figure(figsize=(75, 20))\n fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind))\n for chunk_ind, chunk in enumerate(chunks):\n curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1)\n for trace_ind in range(traces.shape[0]):\n curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind])\n\n curr_ax.set_xlim([0, chunk_size])\n curr_ax.set_ylim([v_min, v_max * 1.2])\n curr_ax.legend()\n\n return fig\n\ndef plot_traces_for_multi_process(params):\n\n curr_traces, plot_chunk_size, roi_ind, figures_folder = params\n\n print('roi_{:04d}'.format(roi_ind))\n\n curr_fig = plot_traces_chunks(traces=curr_traces,\n labels=['center', 'surround', 'subtracted'],\n chunk_size=plot_chunk_size,\n roi_ind=roi_ind)\n curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind)))\n curr_fig.clear()\n plt.close(curr_fig)\n\ndef run():\n curr_folder = os.path.dirname(os.path.realpath(__file__))\n os.chdir(curr_folder)\n\n data_f = h5py.File('rois_and_traces.hdf5')\n traces_raw = data_f['traces_center_raw'].value\n traces_srround = data_f['traces_surround_raw'].value\n\n traces_subtracted = np.zeros(traces_raw.shape, np.float32)\n ratio = np.zeros(traces_raw.shape[0], np.float32)\n err = np.zeros(traces_raw.shape[0], np.float32)\n\n for i in range(traces_raw.shape[0]):\n curr_trace_c = traces_raw[i]\n curr_trace_s = traces_srround[i]\n curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam)\n print(\"roi_%s \\tr = %.4f; error = %.4f.\" % (ft.int2str(i, 5), curr_r, curr_err))\n traces_subtracted[i] = curr_trace_sub\n ratio[i] = curr_r\n err[i] = curr_err\n\n print('\\nplotting neuropil subtraction results ...')\n figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam)\n if not os.path.isdir(figures_folder):\n os.makedirs(figures_folder)\n\n params = []\n for roi_ind in range(traces_raw.shape[0]):\n\n curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]])\n\n params.append((curr_traces, plot_chunk_size, roi_ind, figures_folder))\n\n p = Pool(process_num)\n p.map(plot_traces_for_multi_process, params)\n\n # wait for keyboard abortion\n # msg = raw_input('Do you want to save? (y/n)\\n')\n # while True:\n # if msg == 'y':\n # break\n # elif msg == 'n':\n # sys.exit('Stop process without saving.')\n # else:\n # msg = raw_input('Do you want to save? (y/n)\\n')\n\n data_f['traces_center_subtracted'] = traces_subtracted\n data_f['neuropil_r'] = ratio\n data_f['neuropil_err'] = err\n\n data_f.close()\n\nif __name__ == \"__main__\":\n run()","repo_name":"zhuangjun1981/NeuroAnalysisTools","sub_path":"NeuroAnalysisTools/scripts/analysis_pipeline_movie/old/within_plane_folder/160_get_neuropil_subtracted_traces.py","file_name":"160_get_neuropil_subtracted_traces.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"}
+{"seq_id":"12154604212","text":"import logging\r\nfrom logging.handlers import RotatingFileHandler\r\nimport requests\r\nimport json\r\nimport time\r\nimport os\r\nimport sys\r\nimport names\r\nimport uuid\r\nimport random as r\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning #used to allow insecure https\r\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning) #used to allow insecure https\r\n\r\n\r\n \r\n ####################### Logging Configuration #######################\r\nrfh = logging.handlers.RotatingFileHandler(\r\n\r\n filename=('C:/CallAnnotate/Exportlogs.txt'),\r\n mode='a',\r\n maxBytes=5*1024*1024,\r\n backupCount=2,\r\n encoding=None,\r\n delay=0\r\n\r\n)\r\nlogging.basicConfig(\r\n level=logging.DEBUG,\r\n format=\"%(asctime)s %(name)-5s %(levelname)-5s %(message)s\",\r\n datefmt=\"%y-%m-%d %H:%M:%S\",\r\n handlers=[\r\n rfh\r\n ]\r\n)\r\n\r\nlogger = logging.getLogger('main')\r\n\r\nlogger.debug(\"test\")\r\n\r\n\r\nif os.path.exists(\"C:/CallAnnotate/calls.text\"):\r\n os.remove(\"C:/CallAnnotate/calls.text\")\r\nelse:\r\n print(\"calls.txt is not present - proceeding\")\r\n\r\nif os.path.exists(\"C:/CallAnnotate/calls.json\"):\r\n os.remove(\"C:/CallAnnotate/calls.json\")\r\nelse:\r\n print(\"calls.json is not present - proceeding\")\r\n\r\n\r\n\r\n####################### Recorder Configuration #######################\r\nwith open('C:/CallAnnotate/config.json') as json_file:\r\n data = json.load(json_file)\r\nipAddress = (data['Recorder']['ipAddress'])\r\nu = (data['Recorder']['login'])\r\np = (data['Recorder']['password'])\r\nskip = (data['Search']['resultsToSkip'])\r\nsearchMode = (data['Search']['searchMode'])\r\nstartTime = (data['Search']['startTime'])\r\nendTime = (data['Search']['endTime'])\r\n\r\n####################### Get Token #######################\r\nurl = \"http://%s:1480/api/v1/sessions/login\" % (ipAddress)\r\npayload = \"\"\r\nheaders = {\r\n 'username': u,\r\n 'password': p\r\n}\r\nresponse = requests.request(\"POST\", url, headers=headers, data=payload)\r\nresponse.json()\r\njsonResponse = response.json()\r\nx = (jsonResponse[\"authToken\"])\r\nprint(x)\r\n\r\n####################### Annotate #######################\r\n\r\nfor callId in range(2366,91040):\r\n loopstart1 = time.perf_counter()\r\n username = names.get_full_name()\r\n #callIdvalue = callId\r\n ph_no = []\r\n ph_no.append(r.randint(6, 9))\r\n for i in range(1, 10):\r\n ph_no.append(r.randint(0, 9)) \r\n for i in ph_no:\r\n print(i, end=\"\")\r\n telephoneNumber = i\r\n \r\n url = \"http://%s:1480/api/v1/annotation/call\" % (ipAddress) #url with callId variable from json\r\n payload= json.dumps({\r\n \"callID\": \"%s\" % (callId),\r\n \"annotationFields\": [\r\n {\r\n \"fieldName\": \"96\",\r\n \"fieldData\": \"%s\" % (username)\r\n },\r\n {\r\n \"fieldName\": \"20\",\r\n \"fieldData\": \"%s\" % (telephoneNumber)\r\n },\r\n {\r\n \"fieldName\": \"1001\",\r\n \"fieldData\": \"%s\" % str(uuid.uuid4())\r\n },\r\n {\r\n \"fieldName\": \"221\",\r\n \"fieldData\": \"%s\" % str(uuid.uuid4())\r\n }\r\n ]\r\n })\r\n headers = {\r\n 'authToken': x,\r\n 'Content-Type': 'application/json'\r\n }\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n time.sleep(0.1) # Sleep for 1 second\r\n\r\n\r\n\r\nfinishalltimer = time.perf_counter()\r\n#logging.debug(f'Total time to complete {round(finishalltimer-startalltimer, 2)} second(s)')\r\n","repo_name":"adamhacker90/python-projects","sub_path":"Pull Calls/callannotate.py","file_name":"callannotate.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"21490469799","text":"import functools\nimport inspect\nimport pprint\nimport traceback\nfrom datetime import datetime\n\nfrom connect.client import ClientError\nfrom connect.eaas.core.responses import BackgroundResponse\n\n\ndef _send_notification(msg, kwargs):\n if 'logger' in kwargs:\n kwargs['logger'].info(msg)\n\n\ndef safe_client(\n response_func=lambda x: BackgroundResponse.fail(x),\n):\n def decorator(func):\n if inspect.iscoroutinefunction(func):\n @functools.wraps(func)\n async def wrapper(*args, **kwargs):\n try:\n return await func(*args, **kwargs)\n except ClientError as error:\n msg = (\n f'{error}\\n'\n f'Exception occured at: {datetime.now()}\\n '\n f'{pprint.pformat(traceback.format_exc())}',\n )\n _send_notification(msg, kwargs)\n if response_func:\n return response_func(msg)\n raise\n else:\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ClientError as error:\n msg = (\n f'{error}\\n'\n f'Exception occured at: {datetime.now()}\\n '\n f'{pprint.pformat(traceback.format_exc())}',\n )\n _send_notification(msg, kwargs)\n if response_func:\n return response_func(msg)\n raise\n return wrapper\n\n return decorator\n","repo_name":"cloudblue/eaas-e2e-events-fulfillment","sub_path":"connect_ext/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"28925766906","text":"import random\narray = [random.randint(1, 5) for n in range(0, 10)]\n\ndef histogram(array):\n for n in range(1, 6):\n ocurrencies = array.count(n)\n pin = '*' * ocurrencies\n print(f'{n}: {pin}')\n\nif __name__ == '__main__':\n print(array)\n histogram(array)","repo_name":"Daniel-Ortiz1210/code-challenges","sub_path":"sixth.py","file_name":"sixth.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"23574757030","text":"import datetime\nimport os\nimport subprocess\n\nimport numpy\nfrom scipy.stats import norm\n\nfrom . import romannumerals\n\n\n# ToDo: Bring back scale bar\n# ToDo: Add option for solid fill of vectors\n\n\ndef roundto(num, nearest):\n \"\"\"\n Rounds :param:`num` to the nearest increment of :param:`nearest`\n \"\"\"\n return int((num + (nearest / 2)) // nearest * nearest)\n\n\ndef convert_chromosome_name(chrom_string, dialect='ucsc'):\n \"\"\"\n Try to auto-detect chromosome number and convert it to the specified \"dialect\".\n\n Valid dialects are \"ucsc\", \"ensembl\" and \"yeast\".\n\n :param chrom_string:\n :param source:\n :param dest:\n :return:\n \"\"\"\n try:\n chrom_string = str(romannumerals.roman_to_int(chrom_string))\n except ValueError:\n pass\n\n if dialect == 'ensembl':\n if chrom_string == 'chrM':\n return 'dmel_mitochonrdion_genome'\n elif chrom_string[:3].lower() == 'chr':\n return chrom_string[3:]\n else:\n return chrom_string\n elif dialect == 'ucsc':\n if chrom_string == 'dmel_mitochondrion_genome':\n return 'chrM'\n elif chrom_string[:3].lower() == 'chr':\n return chrom_string\n else:\n return 'chr{}'.format(chrom_string)\n elif dialect == 'yeast':\n if chrom_string[:3].lower() == 'chr':\n chrom_string = chrom_string[3:]\n try:\n return romannumerals.int_to_roman(int(chrom_string))\n except ValueError:\n return chrom_string\n else:\n raise ValueError('Unknown dialect {}'.format(dialect))\n\n\ndef binary_search_tag_file(tag_filename, search_target):\n \"\"\"\n Find the offset (in bytes) in :param:`tag_filename` that corresponds\n to the start of the first tag that is equal to or greater than :param:`search_target`.\n\n If none of the reads have a start position greater than :param:`search_target`,\n return None.\n\n Note that positions in tag files have a 1-based index.\n \"\"\"\n\n def get_read_start(file_offset):\n tag_file.seek(file_offset)\n if file_offset > 0:\n _ = tag_file.readline() # read forward to get to a line start\n this_line = tag_file.readline().strip()\n if tag_file.tell() >= filesize:\n # We've reached the end of the file and the reads are still upstream of the target\n return None\n else:\n return int(this_line.split('\\t')[1])\n\n filesize = os.path.getsize(tag_filename)\n search_window_start = 0\n search_window_end = filesize - 1\n guess_genomic_start = -1\n guess = int((search_window_start + search_window_end) / 2)\n\n with open(tag_filename, 'rt') as tag_file:\n first_genomic_start = get_read_start(search_window_start)\n # last_genomic_start = get_read_position(search_window_end)\n\n if search_target < first_genomic_start:\n return search_window_start\n\n while search_window_end - search_window_start > 1:\n guess = int((search_window_start + search_window_end) / 2)\n guess_genomic_start = get_read_start(guess)\n\n if guess_genomic_start == None:\n return None\n\n # print(search_window_start, guess, search_window_end, guess_genomic_start)\n\n if guess_genomic_start < search_target:\n # print('\\ttoo low!')\n search_window_start = guess\n\n elif guess_genomic_start > search_target:\n search_window_end = guess\n\n # print('\\ttoo high!')\n else:\n # print('\\tjust right!')\n break\n\n if guess_genomic_start == -1:\n return None\n\n if guess_genomic_start < search_target:\n guess += 1\n\n tag_file.seek(guess)\n _ = tag_file.readline()\n guess = tag_file.tell()\n\n return guess\n\n\ndef bgzip_gff(gff3_fname, bgzipped_fname):\n \"\"\"\n Compress a GFF3 file in block-gzip format (requires that bgzip be accessible on the current path).\n\n If :param gff3_fname: ends with '.gz' assumes that the file is gzipped, otherwise assumes it is uncompressed.\n\n :param gzipped_fname:\n :param bgzipped_fname:\n :return:\n \"\"\"\n if bgzipped_fname == gff3_fname:\n log_print('Destination and source file cannot have the same name!')\n\n cmd_line = '{} {} | sort -k1,1 -k4,4n | bgzip > {}'.format(('cat', 'zcat')[gff3_fname.endswith('.gz')], gff3_fname,\n bgzipped_fname)\n try:\n assert os.path.isfile(gff3_fname) # needed since no error occurs otherwise\n subprocess.check_call(cmd_line, shell=True)\n\n except subprocess.CalledProcessError as cpe:\n log_print('Unsuccessful. Got return code {}'.format(cpe.returncode))\n\n except AssertionError:\n log_print('{} not found!'.format(gff3_fname))\n\n else:\n log_print('Successfully generated block-gzipped file {} from {}'.format(bgzipped_fname, gff3_fname))\n\n\ndef generate_tabix_index(target_fname):\n \"\"\"\n Index :param target_fname: with tabix. Requires that the directory in which :param:target_fname: resides is\n writeable.\n\n :param target_fname:\n :return:\n \"\"\"\n cmd_line = 'tabix -f -p gff {}'.format(target_fname)\n try:\n return_code = subprocess.check_call(cmd_line, shell=True)\n except subprocess.CalledProcessError as cpe:\n log_print('Unsuccessful. Got return code {}'.format(cpe.returncode))\n else:\n log_print('Successfully indexed block-gzipped file {}'.format(target_fname))\n\n\ndef pretty_now():\n \"\"\"\n Returns the current date/time in a nicely formatted string (without decimal seconds)\n \"\"\"\n return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%b-%d %H:%M:%S')\n\n\ndef log_print(message, tabs=1):\n \"\"\"\n Print a chunk of text preceded by a timestamp and an optional number of tabs (default 1).\n\n :param message:\n :param tabs:\n :return:\n \"\"\"\n print('{}{}{}'.format(pretty_now(), '\\t' * tabs, message))\n\n\ndef gaussian_kernel(sd, sd_cutoff=3, normalize=False):\n \"\"\"\n Generate and return a numpy.Array whose elements are proportional to the PDF of a normal distribution\n having standard deviation :param:`sd`.\n\n :param sd:\n :param sd_cutoff:\n :param normalize:\n :return:\n \"\"\"\n bw = sd_cutoff * sd * 2 + 1\n midpoint = sd_cutoff * sd\n kern = numpy.zeros(bw)\n frozen_rv = norm(scale=sd)\n for i in range(bw):\n kern[i] = frozen_rv.pdf(i - midpoint)\n if normalize:\n kern = kern / kern.max()\n return kern\n\n \ndef add_label(ax, tick, tick_label, axis='x'):\n \"\"\"\n Updates the set of ticks and tick labels for the specified matplotlib.Axes object\n and axis.\n \n If the tick already exists, it's label will be updated. If not, it will be created and labeled\n appropriately.\n \n \"\"\"\n if axis == 'y':\n tick_getter, label_getter = ax.get_yticks, ax.get_yticklabels\n tick_setter, label_setter = ax.set_yticks, ax.set_yticklabels\n else:\n tick_getter, label_getter = ax.get_xticks, ax.get_xticklabels\n tick_setter, label_setter = ax.set_xticks, ax.set_xticklabels\n \n labels = dict(zip(tick_getter(), label_getter()))\n labels[tick] = tick_label\n new_ticks, new_labels = zip(*sorted(labels.items()))\n tick_setter(new_ticks)\n label_setter(new_labels)\n\n \ndef adjust_limits(ax, new_position, axis='y', padding_fraction=0.1):\n \"\"\"\n If necessary adjusts the limits for the specified :param axis: on \n :param ax: to accomodate :param new_position: according to the \n following scheme:\n \n 1. Assumes that the current limits are the \n smallest and largest content item minus / plus a padding equal to\n :param padding_fraction: * the span between the smallest\n and largest content item.\n 2. If :param new_position: is beyond the inferred content limits,\n adjust the padding to :param padding_fraction: * the new content\n span, then adjust the plot limits to the new content limits\n minus / plus the new padding. \n \"\"\"\n assert padding_fraction < 0.5, 'padding_fraction must be below 0.5!'\n \n if axis == 'y':\n limit_getter = ax.get_ylim\n limit_setter = ax.set_ylim\n else:\n limit_getter = ax.get_xlim\n limit_setter = ax.set_xlim\n \n current_plot_min, current_plot_max = limit_getter()\n current_plot_span = current_plot_max - current_plot_min\n current_data_span = current_plot_span / (1 + 2 * padding_fraction)\n current_pad = current_data_span * padding_fraction\n current_data_min = current_plot_min + current_pad\n current_data_max = current_plot_max - current_pad\n \n# print(current_plot_min, current_plot_max, current_plot_span)\n# print(current_data_min, current_data_max, current_data_span, current_pad)\n \n if new_position > current_data_max:\n new_data_min = current_data_min\n new_data_max = new_position\n \n elif new_position < current_data_min:\n new_data_min = new_position\n new_data_max = current_data_max\n else:\n # no changes needed\n return\n \n new_data_span = new_data_max - new_data_min\n new_pad = new_data_span * padding_fraction\n new_plot_min = new_data_min - new_pad\n new_plot_max = new_data_max + new_pad\n \n# print(new_data_min, new_data_max, new_data_span, new_pad)\n# print(new_plot_min, new_plot_max)\n\n limit_setter((new_plot_min, new_plot_max)) \n \n \ndef diag_indices(n, k=0):\n \"\"\"\n Return the indices corresponding to the kth diagonal of an n X n array\n in the form of a tuple of (x coords, y coords). \n \n Created since numpy does not provide this functionality.\n \"\"\"\n if k <= 0:\n x_coords = numpy.arange(-k, n)\n y_coords = numpy.arange(0, n + k)\n else:\n x_coords = numpy.arange(0, n - k)\n y_coords = numpy.arange(k, n)\n\n return (x_coords, y_coords) ","repo_name":"phageghost/python-genome-browser","sub_path":"pygbrowse/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":10014,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"27"}
+{"seq_id":"16988692843","text":"from sqlalchemy import Table, Column, Integer, Text, ForeignKey, select, func, insert\n\nfrom db import db_connect, create_tables, metadata\nfrom utils import print_result\n\nengine, connection = db_connect()\n\nproduct = Table(\n \"products\",\n metadata,\n Column(\"product_id\", Integer, primary_key=True),\n Column(\"product_name\", Text, nullable=False),\n)\n\nsale = Table(\n \"sales\",\n metadata,\n Column(\"sale_id\", Integer, nullable=False, primary_key=True),\n Column(\"product_id\", Integer, ForeignKey(product.c.product_id, ondelete=\"CASCADE\", onupdate=\"CASCADE\"),\n nullable=False),\n Column(\"year\", Integer, nullable=False, primary_key=True),\n Column(\"quantity\", Integer, nullable=False),\n Column(\"price\", Integer, nullable=False),\n)\n\ncreate_tables(engine)\n\nnew_products = [\n {\"product_id\": 100, \"product_name\": \"Nokia\"},\n {\"product_id\": 200, \"product_name\": \"Apple\"},\n {\"product_id\": 300, \"product_name\": \"Samsung\"},\n]\n\nnew_sales = [\n {\"sale_id\": 1, \"product_id\": 100, \"year\": 2008, \"quantity\": 10, \"price\": 5000},\n {\"sale_id\": 2, \"product_id\": 100, \"year\": 2009, \"quantity\": 12, \"price\": 5000},\n {\"sale_id\": 7, \"product_id\": 200, \"year\": 2011, \"quantity\": 15, \"price\": 9000},\n]\n\nconnection.execute(insert(product), new_products)\nconnection.execute(insert(sale), new_sales)\nconnection.commit()\n\nquery = (\n select(sale.c.product_id, func.sum(sale.c.quantity).label(\"total_quantity\"))\n .group_by(sale.c.product_id)\n)\nresult = connection.execute(query)\nprint_result(result)\n\nconnection.close()\n","repo_name":"redmonkez12/sqlalchemy-practice","sub_path":"task_19/task_19_core.py","file_name":"task_19_core.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"32592855688","text":"# Lab Exercise 03\r\nprint('Lab Exercise 03 \\n')\r\n\r\n# Setup\r\ncompanies = [\r\n \"Domino's, Ann Arbor, 14400, Food\",\r\n \"Fisher Investments, Camas, 3500, financial\",\r\n \"M&T Bank, Buffalo, 16840, Financial\",\r\n \"Dimensional Insight, Burlington, 102, Tech\",\r\n \"Bloomingdale's, New York, 6500, Retail\",\r\n \"Meijer, Grand Rapids, 70000, Retail\",\r\n \"CIL Management Consultants, Chicago, 189, Consulting\"\r\n]\r\n\r\n# Problem 01 (3 points)\r\n\r\n\r\nlocations = []\r\nfor company in companies:\r\n company = company.split(', ')\r\n if len(company) > 1:\r\n locations.append(company[1])\r\nprint(f\"\\n1. locations = {locations}\")\r\n\r\n# Problem 02 (4 points)\r\n\r\nfinancial_co = []\r\n\r\nfor company in companies:\r\n company = company.split(', ')\r\n if company[3].lower() == 'financial':\r\n financial_co.append(company[0])\r\nprint(f\"\\n2. financial_co = {financial_co}\")\r\n\r\n# Problem 03 (4 points)\r\n\r\ncount = 0\r\nfor retail in companies:\r\n if retail.lower().endswith('retail'):\r\n count += 1\r\nprint(f\"\\n3. There are in total of {count} companies in the retail industry\")\r\n\r\n# PROBLEM 4 (4 Points)\r\nsmall_companies = [] # less than 500\r\nmedium_companies = [] # all the rest\r\nlarge_companies = [] # larger or equal to 5000\r\n\r\n\r\nfor company in companies:\r\n company = company.split(', ')\r\n size = int(company[2])\r\n if size < 500:\r\n small_companies.append(company[0])\r\n elif size >= 5000:\r\n large_companies.append(company[0])\r\n else:\r\n medium_companies.append(company[0])\r\n\r\nprint(small_companies)\r\nprint(medium_companies)\r\nprint(large_companies)\r\n# PROBLEM 5 (4 Points)\r\nlargest_company = ''\r\nnum = 0\r\nfor largest in companies:\r\n largest = largest.split (', ')\r\n if int(largest[-2]) > num:\r\n num = int(largest[-2])\r\n largest_company = largest[0]\r\nprint(largest_company)\r\n# END LAB EXERCISE\r\n","repo_name":"lindseyeis/SI-506","sub_path":"Lab Exercises/Lab Exercise 3/lab_exercise_03.py","file_name":"lab_exercise_03.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"41008453432","text":"import asyncio\nimport js\nimport os\nimport datetime as dt\nfrom js import Object\nfrom js import fetch\nfrom pyodide import to_js\n\nfrom pyodide.http import pyfetch\n\ndef guess_domain():\n \"\"\"Guess domain the JupyterLite environment is served from.\"\"\"\n location = '/'.join(':'.join(str(js.location).split(':')[1:]).split('/')[:-1])\n return location\n\n# Ish via https://til.simonwillison.net/python/sqlite-in-pyodide\nfrom pyodide import open_url\n\nDB_NAME = \"JupyterLite Storage\"\n\ndef remote_load(url):\n \"\"\"Import and run code from a remote URL.\"\"\"\n exec(open_url(url).read(), globals())\n \n# Via: https://github.com/jupyterlite/jupyterlite/discussions/91#discussioncomment-1137213\nasync def get_contents(path, raw=False):\n \"\"\"Load file from in-browser storage. Contents are in ['content'].\n \n Use the IndexedDB API to access JupyterLite's in-browser (for now) storage\n \n For documentation purposes, the full names of the JS API objects are used.\n \n See https://developer.mozilla.org/en-US/docs/Web/API/IDBRequest\n \"\"\"\n # we only ever expect one result, either an error _or_ success\n queue = asyncio.Queue(1)\n \n IDBOpenDBRequest = js.self.indexedDB.open(DB_NAME)\n IDBOpenDBRequest.onsuccess = IDBOpenDBRequest.onerror = queue.put_nowait\n \n await queue.get()\n \n if IDBOpenDBRequest.result is None:\n return None\n \n IDBTransaction = IDBOpenDBRequest.result.transaction(\"files\", \"readonly\")\n IDBObjectStore = IDBTransaction.objectStore(\"files\")\n IDBRequest = IDBObjectStore.get(path, \"key\")\n IDBRequest.onsuccess = IDBRequest.onerror = queue.put_nowait\n \n await queue.get()\n \n response = IDBRequest.result.to_py() if IDBRequest.result else None\n\n if raw:\n return response\n else:\n return response['content'] if response else None\n\nasync def load_file_into_in_mem_filesystem(url, fn=None):\n \"\"\"Load a file from a URL into an in-memory filesystem.\"\"\"\n \n # Create a filename if required\n fn = fn if fn is not None else url.split(\"/\")[-1]\n \n # Fetch file from URL\n res = await fetch(url)\n \n # Buffer it\n buffer = await res.arrayBuffer()\n \n # Write file to in-memory file system\n open(fn, \"wb\").write(bytes(buffer.valueOf().to_py()))\n \n return fn\n\nasync def get_stream_from_url(url):\n res = await pyfetch(url)\n stream = await res.bytes()\n return stream\n\n# There is also another possible implementation\nasync def load_file_into_in_mem_filesystem2(url, fn=None):\n # Create a filename if required\n fn = fn if fn is not None else url.split(\"/\")[-1]\n\n stream = await get_stream_from_url(url)\n\n # Write file to in-memory file system\n open(fn, \"wb\").write(stream)\n \n return fn\n\n\n# Call as:\n# url=\"https://raw.githubusercontent.com/psychemedia/lang-fairy-books/main/data.db\"\n# db_file = await load_file_into_in_mem_filesystem(url)\n\n# Demo:\n#import sqlite3\n# Open database connection\n#c = sqlite3.connect(db_file)\n\n# Show database tables\n#c.execute(\"SELECT name FROM sqlite_master WHERE type='table';\").fetchall()\n \n\n# via https://github.com/jupyterlite/jupyterlite/discussions/91#discussioncomment-2440964\nasync def put_contents(content, path, overwrite=False):\n \"\"\"\n \"\"\"\n # count existing\n queue = asyncio.Queue(1)\n \n IDBOpenDBRequest = js.self.indexedDB.open(DB_NAME)\n IDBOpenDBRequest.onsuccess = IDBOpenDBRequest.onerror = queue.put_nowait\n await queue.get()\n \n if IDBOpenDBRequest.result is None:\n return None\n \n IDBTransaction = IDBOpenDBRequest.result.transaction(\"files\", \"readonly\")\n IDBObjectStore = IDBTransaction.objectStore(\"files\")\n IDBRequest = IDBObjectStore.count(path)\n \n IDBRequest.onsuccess = IDBRequest.onerror = queue.put_nowait\n await queue.get()\n \n count = IDBRequest.result\n # print(f'count = {count}')\n \n if count == 1 and not overwrite:\n print(f'file {path} exists - will not overwrite')\n return \n \n # add file\n value = {\n 'name': os.path.basename(path), \n 'path': path,\n 'format': 'text',\n 'created': dt.datetime.now().isoformat(),\n 'last_modified': dt.datetime.now().isoformat(),\n 'content': content,\n 'mimetype': 'text/plain',\n 'type': 'file',\n 'writable': True,\n }\n #print(value)\n\n IDBTransaction = IDBOpenDBRequest.result.transaction(\"files\", \"readwrite\")\n IDBObjectStore = IDBTransaction.objectStore(\"files\")\n # see https://github.com/pyodide/pyodide/issues/1529#issuecomment-905819520\n value_as_js_obj = to_js(value, dict_converter=Object.fromEntries)\n if count == 0:\n IDBRequest = IDBObjectStore.add(value_as_js_obj, path)\n if count == 1:\n IDBRequest = IDBObjectStore.put(value_as_js_obj, path)\n IDBRequest.oncomplete = IDBRequest.onsuccess = IDBRequest.onerror = queue.put_nowait\n await queue.get()\n \n return IDBRequest.result","repo_name":"innovationOUtside/ouseful_jupyterlite_utils","sub_path":"ouseful_jupyterlite_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"27"}
+{"seq_id":"2287625141","text":"\"\"\" \"\"\"\nfrom setuptools import setup, find_packages\n\n__version__ = '0.0.4'\n\n# setup packages to be installed\npackages = find_packages(exclude=['*.tests',])\n\npackage_data = {}\n\n# include useful and/or necessary scripts\nscripts = ['bin/modtransform_simple',\n 'bin/modtransform_seq',]\n\n# python packages required for install\ninstall_requires = [\n \"argparse >= 1.3.0\",\n \"pysam >= 0.8.2.1\",\n]\n\nsetup_args = {\n \"name\" : \"modtransforms\",\n \"version\" : __version__,\n \"author\" : \"Karl Eklund\",\n \"author_email\" : \"keklund@ad.unc.edu\",\n \"description\" : \"Update various file formats using MOD File\",\n \"packages\" : packages,\n \"package_data\" : package_data,\n \"scripts\" : scripts,\n \"license\" : open(\"LICENSE\").read(),\n \"url\" : \"\",\n \"install_requires\" : install_requires,\n}\n\nsetup(**setup_args)\n","repo_name":"keeklund-zz/modtransforms","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"72570772233","text":"counties = {\n 'MBS': 'Mombasa',\n 'BGM': 'Bungoma',\n 'UG': 'Uasin Gishu',\n 'NBI': 'Nairobi',\n 'TRN': 'Trans-Nzoia',\n}\n\n\ncapitals = {\n 'UG': 'Eldoret',\n 'TRN': 'Kitale',\n 'NBI': 'Nairobi',\n}\n\ncounties['KSM'] = 'Kisumu'\ncapitals['KSM'] = None\ncapitals['MBS'] = 'Nyali'\ncapitals['BGM'] = 'Kimilili'\n\n\nprint('_' * 50)\nprint('The capital of ', counties['UG'], ' county is ', capitals['UG'])\nprint('_' * 50)\n\n\nfor abr, county in counties.items():\n print('%s is abreviated as %s' % (county, abr))\n\n\nprint('_' * 50)\nfor abr, capital in capitals.items():\n print('%s is the capital of %s' % (capital, abr))\n\n\nprint('_' * 50)\nfor abr, county in counties.items():\n print('The capital of %s(%s) is %s' % (county, abr, capitals[abr]))\n\n\nprint('_' * 50)\nwhere = input('Which county abbreviation? : ')\nhere = counties.get(where)\nprint('That is %s county ' % here)\n","repo_name":"deisaack/pure","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"41078519785","text":"from email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP\n\n\ndef main():\n # 创建文本内容\n text_content = MIMEText('python 邮件测试', 'plain', 'utf-8')\n # 创建一个带附件的邮件消息对象\n message = MIMEMultipart()\n message['Subject'] = Header(\"测试带附件的邮件\")\n message['From'] = Header('你爹', 'utf-8')\n message['To'] = Header('有缘人', 'utf-8')\n # 将文本内容添加到邮件消息对象\n message.attach(text_content)\n\n # 读取文件并将文件作为附件添加到邮件消息对象中\n with open('img/post.jpg', 'rb') as f:\n img = MIMEText(f.read(), 'base64', 'utf-8')\n img['Context-Type'] = 'image/jpeg'\n img['Content-Disposition'] = 'attachment; filename=post.png'\n message.attach(img)\n\n # 创建邮件对象\n smtper = SMTP('smtp.qq.com')\n # 开启安全连接\n smtper.starttls()\n sender = 'xx.qq.com'\n key = 'xxxxx'\n receivers = [\n 'xx@qq.com',\n 'xx@qq.com',\n 'xx@qq.com'\n ]\n smtper.login(sender, key)\n smtper.sendmail(sender, receivers, message.as_string())\n smtper.quit()\n print(\"发送成功!\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yrzs/pythonStudy0-100","sub_path":"day01-15/day14/demo7.py","file_name":"demo7.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"36967199284","text":"# Create a network from a custom configuration object.\nfrom bsb.config import Configuration\nfrom bsb.core import Scaffold\n\ncfg = Configuration()\n# Let's set a file name for the network\ncfg.storage.root = \"my_network.hdf5\"\n# And add a cell type\ncfg.cell_types.add(\n \"hero_cells\",\n spatial=dict(\n radius=2,\n density=1e-3,\n ),\n)\n\n# After customizing your configuration, create a network from it.\nnetwork = Scaffold(cfg)\nnetwork.compile()\n","repo_name":"dbbs-lab/bsb-core","sub_path":"examples/networks/create_from_cfg.py","file_name":"create_from_cfg.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"27"}
+{"seq_id":"19754510580","text":"#This program is to check an individual corrupt frame\n\nimport cv2 as cv\nfrom sys import argv\n\ncount = 0\n\nvid = argv[1]\n\nvidCap = cv.VideoCapture(vid)\n\nfor count in range(30860) :\n success,image = vidCap.read()\n count += 1\n if count == 300 or count == 1200 or count == 2000 or count == 10000 or count == 25000 :\n\n print(count)\n\nsuccess,image = vidCap.read()\n\nheight, width, channels = image.shape\nprint (height, width, channels)\n","repo_name":"shawsuraj/movie-barcode","sub_path":"tests/frame_check.py","file_name":"frame_check.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"70213555591","text":"import numpy as np\n\n\nclass Circle:\n def __init__(self, x=0, y=0, rad=0):\n \"\"\" Create a new point at the origin \"\"\"\n self.x = x\n self.y = y\n self.radius = rad\n\n\ndef metric_VRIc(circle_det_list, circle_gt_list):\n beta = 0.5\n if not circle_det_list:\n return 0\n VRIc = beta*Cd(circle_det_list, circle_gt_list) + (1-beta)*Cf(circle_det_list, circle_gt_list)\n return VRIc\n\n\ndef Cd(circle_det_list, circle_gt_list):\n cd = 0\n N_gt = len(circle_gt_list)\n for circle_gt in circle_gt_list:\n ov_list = []\n for circle_det in circle_det_list:\n ov = Ov(circle_det, circle_gt)\n ov_list.append(ov)\n if max(ov_list) >= 0.5:\n cd += max(ov_list)/N_gt\n return cd\n\n\ndef Cf(circle_det_list, circle_gt_list):\n cf = 1\n N_d = len(circle_det_list)\n for circle_det in circle_det_list:\n ov_list2 = []\n for circle_gt in circle_gt_list:\n ov = Ov(circle_det, circle_gt)\n ov_list2.append(ov)\n if max(ov_list2) < 0.5:\n cf -= max(ov_list2)/N_d\n return cf\n\n\ndef Ov(circle1, circle2):\n\n radius1 = circle1.radius\n radius2 = circle2.radius\n\n if radius1 >= radius2:\n max_circle = np.pi * radius1**2\n else:\n max_circle = np.pi * radius2**2\n\n center_dist = distance(circle1, circle2)\n\n if center_dist >= radius1 + radius2:\n return 0\n elif radius1 + center_dist <= radius2:\n return np.pi * radius1 ** 2 / max_circle\n elif radius2 + center_dist <= radius1:\n return np.pi * radius2 ** 2 / max_circle\n else:\n ang1 = 2 * np.arccos((radius2**2 + center_dist**2 - radius1**2)/(2*radius2*center_dist)) # arccos range: 0-pi\n ang2 = 2 * np.arccos((radius1**2 + center_dist**2 - radius2**2)/(2*radius1*center_dist))\n return (arc_area(radius2, ang1) + arc_area(radius1, ang2))/max_circle\n\n\ndef distance(c1, c2):\n dist_x = c1.x - c2.x\n dist_y = c1.y - c2.y\n return (dist_y**2 + dist_x**2)**0.5\n\n\ndef arc_area(radius, angle):\n\n arc = radius ** 2 * angle / 2\n if np.pi*2 >= angle > np.pi:\n arc_total = arc + radius **2 * np.sin(np.pi*2 - angle)\n elif np.pi >= angle >= 0:\n arc_total = arc - radius **2 * np.sin(angle)\n else:\n my_error = ValueError(str(angle) + \"is not a valid angle\")\n raise my_error\n return arc_total\n\n","repo_name":"liboyan233/Hole_circle_detection","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"1453329607","text":"import cv2\nimport os\nimport sys\n\n\nOUTPUT_DIR_NAME = \"output\"\n# 出力するファイルのフォーマットを指定する定数\nOUTPUT_EXTENTION = \"png\"\n\n# openCV imwriteの仕様により指定できるフォーマットは限定されています\n# 詳しくはopenCV imwriteのドキュメントをご確認ください\n# URL: https://docs.opencv.org/3.4/d4/da8/group__imgcodecs.html#gabbc7ef1aa2edfaa87772f1202d67e0ce\n\n\ndef normalize_path(path):\n \"\"\"パスを正規化する\n Unix系のOSでは\"/\"を使うがWindowsでは\"\\\\\"を使うため,\n これを\"/\"に正規化する\n \"\"\"\n return os.path.normpath(path.replace('/', '\\\\'))\n\n\ndef is_absolute_path(path):\n \"\"\"パスが絶対パスか判定\"\"\"\n return os.path.isabs(path)\n\n\ndef get_inputfile_abs_path(path):\n \"\"\"入力ファイルのパスの絶対パスを取得\n Returns (str): 入力ファイルの絶対パスを返す\n \"\"\"\n\n # 入力が絶対パスかどうか判定\n if not is_absolute_path(path):\n absolute_path = os.path.abspath(path)\n else:\n absolute_path = path\n\n if not os.path.exists(absolute_path):\n raise FileNotFoundError(f\"{absolute_path} が見つかりません\")\n\n return absolute_path\n\n\ndef get_output_dir_path():\n \"\"\"OUTPUT_DIR_NAMEディレクトリの絶対パス\n Return (str): このスクリプトと同階層にあるOUTPUT_DIR_NAMEディレクトリの絶対パスを返します\n \"\"\"\n # このスクリプトの実行されている絶対パスのディレクトリのパスを返す\n script_abs_dir_path = os.path.dirname(__file__)\n output_dir_path = os.path.join(script_abs_dir_path, OUTPUT_DIR_NAME)\n\n # ouputディレクトリが存在しない場合\n if not os.path.exists(output_dir_path):\n os.makedirs(output_dir_path)\n print(f\"{output_dir_path}ディレクトリを作成しました\")\n return output_dir_path\n\n return output_dir_path\n\n\ndef get_file_basename_without_extention(file_path):\n \"\"\"ファイルのパスから拡張子なしのファイル名を取得\n Args:\n file_path (str): ファイルの絶対パスまたは相対パス\n Returns (str) : ファイルパスから拡張子なしのファイル名を取得します\n \"\"\"\n basename = os.path.basename(file_path)\n file_name_without_extention = os.path.splitext(basename)[0]\n return file_name_without_extention\n\n\ndef get_output_file_path(input_file_path, lower_limit, upper_limit):\n \"\"\"出力ファイルのファイルパスを取得\n Args:\n input_file_path (str): 入力するファイルのファイルパス\n lower_limit (int): 閾値の下限を与える\n upper_limit (int): 閾値の上限を与える\n\n Return (str): 出力先のファイルの絶対パス\n \"\"\"\n\n output_dir_path = get_output_dir_path()\n basename = get_file_basename_without_extention(input_file_path)\n\n # 出力ファイルのファイル名のフォーマット\n # 元のファイル名_閾値下限_閾値上限.出力フォーマットの拡張子\n output_file_name = f\"{basename}_{lower_limit}_{upper_limit}.{OUTPUT_EXTENTION}\"\n\n output_file_path = os.path.join(output_dir_path, output_file_name)\n return output_file_path\n\n\ndef decorator_print_arguments_and_result(original_function):\n \"\"\"引数と結果を描画するデコレータ\"\"\"\n def wrapper_function(*args, **kwargs):\n # 引数の表示\n print(\"=\" * 60)\n print(f\"引数: {args}, {kwargs}\")\n # 関数の実行\n result = original_function(*args, **kwargs)\n # 結果の表示\n print(f\"結果: {result}\")\n print(\"=\" * 60)\n\n return result\n return wrapper_function\n\n\n@decorator_print_arguments_and_result\ndef get_line_extraction(input_file_path, lower_limit, upper_limit):\n \"\"\"画像から線画抽出して画像ファイルに出力\n Args:\n 注意:\n 閾値の下限・上限はある程度実験的に選ぶ必要がある\n 一般的には下限を高めに設定して高い閾値は低い閾値の2~3倍にすることが推奨されている\n 最適な値は試行錯誤によって見つける必要があります\n\n lower_limit (int): 閾値の下限を与える\n upper_limit (int): 閾値の上限を与える\n\n Return (str): 出力先のファイルの絶対パス\n \"\"\"\n # 画像をグレースケールで読み込む\n image = cv2.imread(input_file_path, 0)\n\n # エッジを検出\n edges = cv2.Canny(image, lower_limit, upper_limit)\n\n # 出力ファイル名を取得\n outptu_file_path = get_output_file_path(\n input_file_path, lower_limit, upper_limit)\n\n cv2.imwrite(outptu_file_path, edges)\n\n return outptu_file_path\n\n\n# バリデーション時のエラーを定義\nclass ValidationError(Exception):\n def __init__(self, message):\n self.message = message\n super().__init__(self.message)\n\n\ndef is_integer(value):\n \"\"\"整数かどうか判定\"\"\"\n if isinstance(value, int):\n return True\n else:\n return False\n\n\ndef validation_check(input_file_path, lower_limit, upper_limit):\n \"\"\"入力時のバリデーションチェック\"\"\"\n\n # 入力引数の絶対パスを取得\n inputfile_abs_path = get_inputfile_abs_path(\n normalize_path(input_file_path))\n\n # ファイルが存在するか確認\n if not os.path.exists(inputfile_abs_path):\n raise ValidationError(f\"{inputfile_abs_path}が見つかりません\")\n\n # 閾値が整数か確認\n if not (is_integer(lower_limit) and is_integer(upper_limit)):\n raise ValidationError(\"閾値は整数で入力してください\")\n\n # 閾値の範囲が適切か確認\n if not (lower_limit > 0 and upper_limit > 0):\n raise ValidationError(\"閾値は自然数で入力してください\")\n if not (lower_limit <= upper_limit):\n raise ValidationError(\"閾���は下限,上限の順に入力してください\")\n\n return True\n\n\ndef main():\n args = sys.argv\n if len(args) != 4:\n raise ValidationError(\"コマンドライン引数が無効です\")\n\n # 引数の受け取り\n input_file_path = args[1]\n lower_limit = int(args[2])\n upper_limit = int(args[3])\n\n # バリデーションチェック\n if validation_check(input_file_path, lower_limit, upper_limit):\n get_line_extraction(input_file_path, lower_limit, upper_limit)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kkml4220/line-extraction","sub_path":"line_extraction.py","file_name":"line_extraction.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"3307492908","text":"import multiprocessing\nfrom Queue import Queue\nimport os\nimport sys\nfrom bluetooth import *\nimport threading\nimport time\nimport struct\nimport logging\nimport json\n\n\n\n\n\ndef removeFilesInFolder(folderpath):\n if os.path.exists(folderpath):\n for filepath in os.listdir(folderpath):\n os.remove(os.path.join(folderpath, filepath))\n\n\nclass BTHandler(multiprocessing.Process):\n\n \n m = multiprocessing.Manager()\n queueHandler = m.Queue()\n\n \n deviceList = [\n \"68:B3:5E:58:96:E8\", # Android Tablet\n \"60:AB:67:91:09:2C\", # DianWei's Phone\n ]\n\n resultFolder = \"/home/pi/checklist-results\"\n imageFolder = \"/home/pi/checklist-images\"\n proResults = []\n\n def __init__(\n self, port, queueJob, header, fpReceived, fpNow\n ):\n multiprocessing.Process.__init__(self)\n\n self.fpReceived = fpReceived\n self.fpNow = fpNow\n self.header = header\n self.queueJob = queueJob\n self.daemon = True\n self.port = port\n self.logger = logging.getLogger(__name__)\n self.c = None\n \n self.start()\n\n def recv(self, c):\n while True:\n try:\n data = c.recv(1024)\n \n\n if sys.version_info[0] == 3 and type(data) == bytes: \n data = data.decode(\"utf8\")\n\n if len(data) > 0:\n packet = data\n self.logger.info(\"[AND] \" + str(packet))\n if \"reset\" in str(packet):\n removeFilesInFolder(self.resultFolder)\n removeFilesInFolder(self.imageFolder)\n self.proResults = []\n self.fpReceived = True\n self.fpNow = False\n \n elif \"finish_explore\" in str(packet):\n self.logger.info(\"finish_explore detected\")\n self.queueJob.put(self.header + \":D:finish_explore\") \n \n elif \"cmd:explore\" in str(packet):\n self.logger.info(\"cmd:explore detected\")\n self.queueJob.put(self.header + \":D:create_run\")\n \n \n \n \n self.queueJob.put(self.header + \":\" + str(packet))\n\n except BluetoothError as e:\n self.logger.error(\"[error][AND] disconnected\")\n self.logger.error(e)\n\n break\n time.sleep(0.2)\n self.c.close()\n self.c = None\n\n def send(self, c, message):\n if self.c == None:\n self.logger.error(\"[error][AND] no device connected\")\n else:\n\n self.logger.info(\"[sending][AND]: \" + message)\n self.c.send(str(message + \"\\n\"))\n \n\n def handleProcessor(self):\n while True:\n if self.queueHandler.qsize() != 0:\n packet = self.queueHandler.get()\n self.queueHandler.task_done()\n\n self.send(self.c, packet)\n\n time.sleep(0.2)\n\n def handle(self, packet):\n self.queueHandler.put(packet)\n \n def closeConnect(self, c):\n self.c.close()\n\n def getPacketHeader(self):\n return self.header \n\n def run(self):\n t2 = threading.Thread(target=self.handleProcessor, args=())\n t2.start()\n\n socketServer = BluetoothSocket(RFCOMM)\n socketServer.bind((\"\", self.port))\n socketServer.listen(1)\n\n while True:\n self.logger.info(\"[LOG][AND] waiting for connection from device\")\n self.c, address = socketServer.accept()\n if address[0] in self.deviceList:\n\n self.logger.info(\"[LOG][AND] Connection from: \" + str(address))\n\n t = threading.Thread(target=self.recv, args=(self.c,))\n t.start()\n t.join()\n else:\n self.logger.error(\n \"[error][AND] unlisted device tried to connect. mac address: \" + address\n )\n self.c.close()\n\n self.c.close()\n socketServer.close()\n t2.join()\n\n\n","repo_name":"anushadatta/MDP","sub_path":"RPi/BTHandler.py","file_name":"BTHandler.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"9840889402","text":"\"\"\"The tests for the panel_custom component.\"\"\"\nimport os\nimport shutil\nimport unittest\nfrom unittest.mock import Mock, patch\n\nfrom homeassistant import bootstrap\nfrom homeassistant.components import panel_custom\n\nfrom tests.common import get_test_home_assistant\n\n\n@patch('homeassistant.components.frontend.setup',\n autospec=True, return_value=True)\nclass TestPanelCustom(unittest.TestCase):\n \"\"\"Test the panel_custom component.\"\"\"\n\n def setup_method(self, method):\n \"\"\"Setup things to be run when tests are started.\"\"\"\n self.hass = get_test_home_assistant()\n\n def teardown_method(self, method):\n \"\"\"Stop everything that was started.\"\"\"\n self.hass.stop()\n shutil.rmtree(self.hass.config.path(panel_custom.PANEL_DIR),\n ignore_errors=True)\n\n @patch('homeassistant.components.panel_custom.register_panel')\n def test_webcomponent_in_panels_dir(self, mock_register, _mock_setup):\n \"\"\"Test if a web component is found in config panels dir.\"\"\"\n config = {\n 'panel_custom': {\n 'name': 'todomvc',\n }\n }\n\n assert not bootstrap.setup_component(self.hass, 'panel_custom', config)\n assert not mock_register.called\n\n path = self.hass.config.path(panel_custom.PANEL_DIR)\n os.mkdir(path)\n\n with open(os.path.join(path, 'todomvc.html'), 'a'):\n assert bootstrap.setup_component(self.hass, 'panel_custom', config)\n assert mock_register.called\n\n @patch('homeassistant.components.panel_custom.register_panel')\n def test_webcomponent_custom_path(self, mock_register, _mock_setup):\n \"\"\"Test if a web component is found in config panels dir.\"\"\"\n filename = 'mock.file'\n\n config = {\n 'panel_custom': {\n 'name': 'todomvc',\n 'webcomponent_path': filename,\n 'sidebar_title': 'Sidebar Title',\n 'sidebar_icon': 'mdi:iconicon',\n 'url_path': 'nice_url',\n 'config': 5,\n }\n }\n\n with patch('os.path.isfile', Mock(return_value=False)):\n assert not bootstrap.setup_component(\n self.hass, 'panel_custom', config\n )\n assert not mock_register.called\n\n with patch('os.path.isfile', Mock(return_value=True)):\n with patch('os.access', Mock(return_value=True)):\n assert bootstrap.setup_component(\n self.hass, 'panel_custom', config\n )\n\n assert mock_register.called\n\n args = mock_register.mock_calls[0][1]\n assert args == (self.hass, 'todomvc', filename)\n\n kwargs = mock_register.mock_calls[0][2]\n assert kwargs == {\n 'config': 5,\n 'url_path': 'nice_url',\n 'sidebar_icon': 'mdi:iconicon',\n 'sidebar_title': 'Sidebar Title'\n }\n","repo_name":"NAStools/homeassistant","sub_path":"tests/components/test_panel_custom.py","file_name":"test_panel_custom.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"27"}
+{"seq_id":"25437705859","text":"print(\"Convert to binary (and base X)\")\n\nimport sys\n\ndef toBinary():\n num = input(\"Enter the base 10 number: \")\n if str.isdigit(num):\n num_bin = bin(int(num))\n binary = \"\"\n for i in range((len(str(num_bin))-2)):\n binary += str(num_bin)[i+2]\n print(binary)\n else:\n sys.exit()\n\ndef toBaseX(base):\n num = input(\"Enter the base 10 number (max \" + str(((base**5)-1)) + \"): \")\n if str.isdigit(num):\n num = int(num)\n if num < base ** 5:\n\n num_list = [0, 0, 0, 0, 0]\n \n while num >= base ** 4:\n num -= base**4\n num_list[0] += 1\n \n while num >= base ** 3:\n num -= base**3\n num_list[1] += 1\n \n while num >= base ** 2:\n num -= base**2\n num_list[2] += 1\n \n while num >= base ** 1:\n num -= base**1\n num_list[3] += 1\n\n while num >= base ** 0:\n num -= base**0\n num_list[4] += 1\n\n total = \"\"\n for i in num_list:\n total += str(i)\n\n print(total)\n \n else:\n sys.exit()\n else:\n sys.exit()\n\nbase = input(\"Enter base: \")\n\nif base.isdigit() == False:\n sys.exit()\nelif int(base) == 2:\n toBinary()\nelse:\n toBaseX(int(base))","repo_name":"nayakrujul/python-scripts","sub_path":"Others/Base X Converter 2.py","file_name":"Base X Converter 2.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"15551888524","text":"import time\nimport heapq\nfrom os import path\nfrom bintrees import RBTree\n\n\ndef read_array(filename):\n result = list()\n\n with open(path.join('.', filename), 'r') as f:\n for row in f.readlines():\n result.append(int(row.strip(\"\\n\")))\n\n assert len(result) == 10000\n assert min(result) == 1\n assert max(result) == 10000\n\n return result\n\n\ndef median_maintenance_heaps(arr):\n sum_median = arr[0]\n heap_low = []; heap_high = []\n\n if arr[0] > arr[1]:\n heapq.heappush(heap_low, -arr[1])\n heapq.heappush(heap_high, arr[0])\n sum_median += arr[1]\n else:\n heapq.heappush(heap_low, -arr[0])\n heapq.heappush(heap_high, arr[1])\n sum_median += arr[0]\n\n i = 3\n\n for cur_num in arr[2:]:\n max_heap_low = -heap_low[0]\n min_heap_high = heap_high[0]\n # import pdb;pdb.set_trace()\n if i % 2 == 1:\n if max_heap_low <= cur_num <= min_heap_high:\n sum_median += cur_num\n # print(cur_num)\n heapq.heappush(heap_low, -cur_num)\n elif cur_num < max_heap_low:\n heapq.heappush(heap_low, -cur_num)\n # print(-heap_low[0])\n sum_median += -heap_low[0]\n else:\n heapq.heappush(heap_high, cur_num)\n # print(heap_high[0])\n sum_median += heap_high[0]\n else:\n if cur_num < max_heap_low:\n heapq.heappush(heap_low, -cur_num)\n else:\n heapq.heappush(heap_high, cur_num)\n\n if len(heap_low) > len(heap_high):\n heapq.heappush(heap_high, - heapq.heappop(heap_low))\n elif len(heap_low) < len(heap_high):\n heapq.heappush(heap_low, - heapq.heappop(heap_high))\n\n # print(-heapq.heappop(heap_low))\n sum_median += -heap_low[0]\n i += 1\n\n assert len(heap_high) + len(heap_low) == len(arr)\n return sum_median\n\n\ndef post_order_traverse(cur):\n \"\"\"\n O(n) to calculate the size of tree rooted by each node. Alternative optimization is to move it to the insert\n method. In terms of time, I didn't overwrite the method in bintrees module.\n \"\"\"\n if cur is not None:\n if cur.left is None and cur.right is None:\n cur.value = 1\n return\n\n left_size = 0\n if cur.left is not None:\n post_order_traverse(cur.left)\n left_size = cur.left.value\n\n right_size = 0\n if cur.right is not None:\n post_order_traverse(cur.right)\n right_size = cur.right.value\n\n cur.value = left_size + right_size + 1\n\n\n# O(logn)\ndef select_order_statisic(subtree_root, i):\n # print(subtree_root.val)\n # assert subtree_root.value >= i\n\n if subtree_root is not None:\n if subtree_root.left is None:\n if i == 1:\n return subtree_root.key\n else:\n return select_order_statisic(subtree_root.right, i - 1)\n\n if subtree_root.right is None:\n if i == (subtree_root.left.value + 1):\n return subtree_root.key\n else:\n return select_order_statisic(subtree_root.left, i)\n\n left_tree_size = subtree_root.left.value\n if i == (left_tree_size + 1):\n return subtree_root.key\n elif i <= left_tree_size:\n return select_order_statisic(subtree_root.left, i)\n else:\n return select_order_statisic(subtree_root.right, i - left_tree_size - 1)\n\n\ndef median_maintenance_rbtree(arr):\n median_sum = arr[0]\n rbtree = RBTree([(arr[0], 1)]) # (number, subtree_size)\n\n for i in range(1, len(arr)):\n ele = arr[i]\n rbtree.insert(ele, 1)\n post_order_traverse(rbtree._root)\n # assert rbtree._root.value == (i+1)\n median_order = int(i / 2) + 1\n median_sum += select_order_statisic(rbtree._root, median_order)\n\n return median_sum\n\n\nif __name__ == \"__main__\":\n input_arr = read_array(\"median_maintenance.txt\")\n\n time_start = time.time()\n print(median_maintenance_heaps(input_arr) % 10000)\n print(time.time() - time_start)\n\n time_start = time.time()\n # input_arr = [1,2,3,4]\n print(median_maintenance_rbtree(input_arr) % 10000)\n print(time.time() - time_start)\n","repo_name":"liaoaoyuan97/stanford_algorithms_specialization","sub_path":"graph_search/week3/median_maintainance.py","file_name":"median_maintainance.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"21793969047","text":"\n# fair array leetcode weekly challenge solution\n\ndef clac_odd_even (arr : list) : \n odd = 0;even = 0\n\n for i in range(len(arr)) : \n if i%2 == 0 : \n even += arr[i]\n else : \n odd += arr[i]\n\n return (odd, even)\n\n\ndef fair_array(arr : list) -> int : \n count = 0\n\n for i in range(len(arr)) : \n odd, even = clac_odd_even(arr)\n \n if i%2 == 0 : \n if even - arr[i] == odd : \n count+=1\n\n else : \n if even == odd-arr[i] : \n count+=1\n\n return count\n\n\nif __name__ == \"__main__\":\n print(fair_array([6,1,7,4,1]))","repo_name":"AnkitAvi11/Data-Structures-And-Algorithms","sub_path":"Arrays/fairarray.py","file_name":"fairarray.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"}
+{"seq_id":"12758124468","text":"class Solution(object):\n def threeSumClosest(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n nums.sort()\n currsum = sum(nums[0:3])\n result = currsum\n diff = abs(target - currsum)\n for i in range(len(nums)-2):\n for j in range(i+1, len(nums)-1):\n for k in range(i+2, len(nums)):\n currsum = nums[i] + nums[j] + nums[k]\n if currsum - target == 0:\n return currsum\n elif diff > abs(currsum-target):\n diff = abs(currsum-target)\n result = currsum\n return result\n","repo_name":"nobeltws/leetcode","sub_path":"python/3sums-closest.py","file_name":"3sums-closest.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"21374342416","text":"import socket\r\nimport selectors\r\n# selectors is a high-level multiplexing tool\r\n\r\nselector = selectors.DefaultSelector()\r\n\r\n\r\ndef server(): \r\n server_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server_soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n server_soc.bind(('localhost', 5001))\r\n server_soc.listen()\r\n\r\n selector.register(fileobj=server_soc, events=selectors.EVENT_READ, data=accept_con)\r\n\r\n\r\n\r\ndef accept_con(server_soc):\r\n client_soc, addr = server_soc.accept()\r\n print(addr, ' connected')\r\n\r\n selector.register(fileobj=client_soc, events=selectors.EVENT_READ, data=send_message)\r\n\r\ndef send_message(client_soc):\r\n\r\n request = client_soc.recv(4096)\r\n if request:\r\n response = \"Hello\".encode()\r\n client_soc.send(response)\r\n else:\r\n selector.unregister(client_soc)\r\n client_soc.close()\r\n\r\n\r\ndef event_loop():\r\n while True:\r\n \r\n events = selector.select() # (key --> object of SelectorKey, events --> just bit mask)\r\n for key, _ in events:\r\n callback = key.data\r\n callback(key.fileobj)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n server()\r\n event_loop()","repo_name":"ignatpenshin/async_python","sub_path":"3_callback.py","file_name":"3_callback.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"1221508199","text":"import requests\nfrom pprint import pprint\n\n\ndef credits(title):\n pass \n # 여기에 코드를 작성합니다. \n SEARCH_URL = f'https://api.themoviedb.org/3/search/movie?api_key=2493cbf091c3129450beca0dc74b2470&language=ko-KR&query={title}&page=1&include_adult=true'\n response_search = requests.get(SEARCH_URL).json()\n movie_id = ''\n\n for i in range(0, len(response_search.get('results'))):\n if response_search.get('results')[i].get('title') == title:\n movie_id = response_search.get('results')[i].get('id')\n \n BASE_URL = 'https://api.themoviedb.org/3'\n path = f'/movie/{movie_id}/credits'\n params = {\n 'api_key' : '2493cbf091c3129450beca0dc74b2470',\n 'language' : 'ko-KR'\n }\n\n response = requests.get(BASE_URL + path, params = params).json()\n \n credits_list = {}\n cast_list = []\n crew_list = []\n\n if (movie_id == ''):\n return None\n else:\n for i in range(0,len(response.get('cast'))):\n if response.get('cast')[i].get('cast_id') < 10:\n cast_list.append(response.get('cast')[i].get('name'))\n for i in range(0,len(response.get('crew'))):\n if response.get('crew')[i].get('department') == 'Directing':\n crew_list.append(response.get('crew')[i].get('name'))\n \n credits_list['cast'] = cast_list\n credits_list['crew'] = crew_list\n\n return credits_list\n\n# 아래의 코드는 수정하지 않습니다.\nif __name__ == '__main__':\n \"\"\"\n 제목에 해당하는 영��가 있으면 해당 영화 id를 통해 영화 상세정보를 검색하여 주연배우 목록(cast)과 스태프(crew) 중 연출진 목록을 반환\n 영화 id 검색에 실패할 경우 None을 반환\n \"\"\"\n pprint(credits('기생충'))\n # {'cast': ['Song Kang-ho', 'Lee Sun-kyun', ..., 'Jang Hye-jin'], 'crew': ['Bong Joon-ho', 'Park Hyun-cheol', ..., 'Yoon Young-woo']}\n pprint(credits('검색할 수 없는 영화'))\n # None\n","repo_name":"wnsn8546/TIL","sub_path":"KDT-MultCampus/0722_PJT-2_API활용_스크래핑/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"42576707296","text":"import requests\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nimport config\nimport deployment\nimport alert\nfrom requests import Session\nfrom requests.adapters import HTTPAdapter\n\nclass Client(object):\n \"\"\"\n BigPanda Client object, used to send alerts and deployments.\n \"\"\" \n def __init__(self, api_token, app_key=None, base_url=config.base_url, timeout=10, max_retries=5, suppress_app_key=False):\n \"\"\"\n Create a new Client object, used to send alerts and deployments.\n\n api_token: Your organization's API token\n app_key: Application key, required for sending alerts.\n \"\"\"\n self.api_token = api_token\n self.app_key = app_key\n self.base_url = base_url\n self.time_out = timeout\n self.max_retries = max_retries\n self.suppress_app_key = suppress_app_key\n self.session = Session()\n\n def deployment(self, component, version, hosts, status='start', owner=None, env=None):\n \"\"\"\n Return a new Deployment object associated with this client.\n \n Refer to bigpanda.Deployment for more help.\n \"\"\"\n return deployment.Deployment(component, version, hosts, status, owner, env, client=self)\n\n def alert(self, status, subject, check=None, description=None, cluster=None, timestamp=None, primary_attr='host', secondary_attr='check', **kwargs):\n \"\"\"\n Return a new Alert object associated with this client.\n \n Refer to bigpanda.Alert for more help.\n \"\"\"\n return alert.Alert(status, subject, check, description, cluster, timestamp, primary_attr, secondary_attr, client=self, **kwargs)\n\n def send(self, data):\n \"\"\"\n Send an alert or deployment object.\n \n Normally equivalent to calling .send() on the object itself, but accepts a list\n of alerts/deployment to send in a single api call.\n \"\"\"\n\n data_type = self._get_data_type(data)\n\n if isinstance(data, list):\n if data_type != 'alert':\n raise TypeError('Batch mode is only supported for alerts.')\n\n messages = list()\n for m in data:\n messages.append(m._build_payload())\n payload = dict(alerts=messages)\n endpoint = data[0]._endpoint\n else:\n payload = data._build_payload()\n endpoint = data._endpoint\n\n # Deployments don't have app_key just yet\n if data_type == 'alert':\n if not self.app_key and not self.suppress_app_key:\n raise RuntimeError(\"app_key is not set\")\n payload['app_key'] = self.app_key\n\n self.post_payload = payload\n\n self._api_call(endpoint, payload)\n\n def _api_call(self, endpoint, data=None):\n\n if self.session is None:\n self.session = Session()\n\n headers = {'Authorization': 'Bearer %s' % self.api_token,\n 'Content-Type': 'application/json'}\n\n s = requests.Session()\n s.mount(self.base_url + endpoint, HTTPAdapter(max_retries=self.max_retries))\n\n if data:\n self.data = data\n r = s.post(self.base_url + endpoint, data=json.dumps(data), headers=headers, timeout=self.time_out)\n else:\n r = s.get(self.base_url + endpoint, headers=headers, timeout=self.time_out)\n\n r.raise_for_status()\n\n def _get_data_type(self, data):\n if isinstance(data, list):\n return type(data[0]).__name__.lower()\n else:\n return type(data).__name__.lower()\n","repo_name":"bigpandaio/bigpanda-python-module","sub_path":"bigpanda/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"30459048260","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.service import Service as FirefoxService\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom selenium.webdriver.common.by import By\nfrom math import sqrt\nfrom sumplete import Sumplete\n\nservice = FirefoxService(executable_path=GeckoDriverManager().install())\n\ndriver = webdriver.Firefox(service=service)\n\ndriver.get(\"https://sumplete.com/master/\")\n\nnumbers = driver.find_elements(By.CLASS_NAME, \"number\")\nn = int(sqrt(len(numbers))) # rows / columns\n\nmatrix = []\nbuttons = {}\nfor i, number in enumerate(numbers):\n if i % n == 0:\n matrix.append([]) # new row\n matrix[-1].append(int(number.text))\n buttons[(len(matrix)-1, len(matrix[-1]) - 1)] = number\n\n# we store horizontal answers\nhanswers = driver.find_elements(By.CLASS_NAME, \"hanswer\")\nrows_answers = []\nfor hanswer in hanswers:\n rows_answers.append(int(hanswer.text))\n\n# we store vertical answers\nvanswers = driver.find_elements(By.CLASS_NAME, \"vanswer\")\ncols_answers = []\nfor vanswer in vanswers:\n cols_answers.append(int(vanswer.text))\n\nsumplete = Sumplete(matrix, rows_answers, cols_answers)\nsumplete.solve()\n\n# nice stuff\nif sumplete.solved:\n for i in range(n):\n for j in range(n):\n if sumplete.answer[i][j]:\n buttons[(i, j)].click()\n buttons[(i, j)].click()\n else:\n buttons[(i, j)].click()","repo_name":"artbrare/sumplete-solver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"19474107667","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import spearmanr\nfrom scipy.cluster import hierarchy\nfrom collections import defaultdict\nfrom gahaco.visualization.visualize import rename_features\n\ndef select_uncorrelated_features(features_df, labels,\n gini_impurities=None,\n method='average', distance_cutoff=0.3,\n experiment=None):\n '''\n Clusters the Spearman rank-roder correlation of the different features, we keep only a single feature per cluster, \n if gini impurities are given keeps the one that was most important for the classificiation at hand.\n\n Args:\n features_df: pandas data frame of features\n Returns:\n reduced dataframe with only low correlated features\n '''\n corr = np.round(spearmanr(features_df).correlation, 4)\n corr_condensed = hierarchy.distance.squareform(1-abs(corr))\n corr_linkage = hierarchy.linkage(corr_condensed, method=method)\n cluster_ids = hierarchy.fcluster(corr_linkage, distance_cutoff, criterion='distance')\n cluster_id_to_feature_ids = defaultdict(list)\n for idx, cluster_id in enumerate(cluster_ids):\n cluster_id_to_feature_ids[cluster_id].append(idx)\n\n if gini_impurities is None:\n corr_labels = []\n for feature in features_df.columns:\n corr_ = spearmanr(features_df[feature], labels).correlation\n corr_labels.append(corr_)\n corr_labels = np.asarray(corr_labels)\n selected_features = [v[np.argmax(abs(corr_labels[v]))] for v in cluster_id_to_feature_ids.values()]\n else:\n selected_features = [v[np.argmax(gini_impurities[v])] for v in cluster_id_to_feature_ids.values()]\n\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))\n dendro = hierarchy.dendrogram(corr_linkage, \n labels=rename_features(features_df.columns), ax=ax1,\n leaf_rotation=90)\n ax1.axhline(y=distance_cutoff, color='black', linestyle='dashed')\n dendro_idx = np.arange(0, len(dendro['ivl']))\n\n\n im = ax2.imshow(corr[dendro['leaves'], :][:, dendro['leaves']])\n fig.colorbar(im, orientation='horizontal', pad = 0.25)\n ax2.set_xticks(dendro_idx)\n ax2.set_yticks(dendro_idx)\n ax2.set_xticklabels(dendro['ivl'], rotation='vertical')\n ax2.set_yticklabels(dendro['ivl'])\n fig.tight_layout()\n\n if experiment is None:\n return fig, features_df[features_df.columns[selected_features].values]\n\n else:\n experiment.log_figure(figure_name=\"Clustering\", figure=fig)\n return features_df[features_df.columns[selected_features].values]\n\n","repo_name":"florpi/GaHaCo","sub_path":"gahaco/features/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"74885922312","text":"# SimpleText class\n\nimport pygame\n# from pygame.locals import *\n\n\nclass SimpleText:\n \n def __init__(self, window, loc, value, text_color):\n pygame.font.init()\n self.window = window\n self.loc = loc\n # None indicates that we will use whatever the standard system font is\n self.font = pygame.font.SysFont(None, 30)\n self.text_color = text_color\n self.text = None # so that the call to setText below will force the creation of the text image\n self.set_value(value) # set the initial text for drawing\n\n def set_value(self, new_text):\n if self.text == new_text:\n return # nothing to change\n\n self.text = new_text # save the new text\n self.text_surface = self.font.render(self.text, True, self.text_color)\n\n def draw(self):\n self.window.blit(self.text_surface, self.loc)\n","repo_name":"armornik/game_oop","sub_path":"pygame/SimpleText/SimpleText.py","file_name":"SimpleText.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"40166015292","text":"import cadquery as cq\n\nfrom constants import M4, ROD, SCREW, WALL\n\nshoulder = (\n cq.Workplane()\n .rect(ROD.width + SCREW.head_diameter + 2 * WALL, ROD.width + 2 * WALL)\n .extrude(SCREW.length / 2)\n)\n\nshoulder = (\n shoulder.faces(\">Z\")\n .edges(\">X\")\n .workplane(centerOption=\"CenterOfBoundBox\")\n .move(-SCREW.head_diameter / 2)\n .hole(SCREW.grip_diameter)\n)\n\nshoulder = (\n shoulder.faces(\">Z\")\n .edges(\"Z\")\n .workplane(centerOption=\"CenterOfBoundBox\")\n .move(0, -ROD.width / 2 - WALL)\n .rect(ROD.width + ROD.clearance, ROD.width + ROD.clearance)\n .cutThruAll()\n)\n\nshoulder = (\n shoulder.faces(\">Z\")\n .workplane(centerOption=\"CenterOfBoundBox\")\n .hole(M4.thread_diameter, depth=2 * WALL)\n)\n\nshoulder = shoulder.faces(\">Z\").wires().toPending().extrude(4)\n\nshoulder = shoulder.edges(\"%line\").fillet(1)\n\n# cq.exporters.export(shoulder, \"shoulder.stl\")\n","repo_name":"AldenMB/Planimeter","sub_path":"shoulder.py","file_name":"shoulder.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"3274292926","text":"import logging\nimport sys\n\nFORMATTER = logging.Formatter(\n \"%(asctime)s — %(name)s — %(levelname)s — %(message)s\")\nLOG_FILE = \"app.log\"\n\n\ndef get_console_handler() -> logging.StreamHandler:\n\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(FORMATTER)\n return console_handler\n\n\ndef get_file_handler() -> logging.FileHandler:\n\n file_handler = logging.FileHandler(LOG_FILE, mode=\"a\")\n file_handler.setFormatter(FORMATTER)\n return file_handler\n\n\ndef get_logger(logger_name: str) -> logging.Logger:\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n logger.propagate = False\n return logger\n","repo_name":"D0v1l3/KeepNotes","sub_path":"app/utilities/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"12540716160","text":"op=int(input('Digite até qual valor fatorial deseja imprimir: \\n'))\r\n \r\nx=op\r\ny=op-1\r\n \r\nprint('\\n')\r\n \r\nfor i in range (op-1):\r\n \r\n x=y*x\r\n y=y-1\r\nif x == 0:\r\n x=1\r\nprint(x)","repo_name":"GeraldoLRJ/Exercicio_Python1","sub_path":"ativ6.5.py","file_name":"ativ6.5.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"70395301512","text":"from functools import partial\n\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport tqdm\n\ndef preprocess(dataset, data_directory, frequency):\n \n preprocessor_nuskin = {\n 'nuskin_tool_20' : partial(_preprocess_nuskin_tool, 20),\n 'nuskin_tool_30' : partial(_preprocess_nuskin_tool, 30),\n 'nuskin_tool_50' : partial(_preprocess_nuskin_tool, 50),\n 'nuskin_handover_rod' : partial(_preprocess_nuskin_handover, 'rod'),\n 'nuskin_handover_box' : partial(_preprocess_nuskin_handover, 'box'),\n 'nuskin_handover_plate' : partial(_preprocess_nuskin_handover, 'plate'),\n 'nuskin_food_apple' : partial(_preprocess_nuskin_food, 'apple'),\n 'nuskin_food_banana' : partial(_preprocess_nuskin_food, 'banana'),\n 'nuskin_food_empty' : partial(_preprocess_nuskin_food, 'empty'),\n 'nuskin_food_pepper' : partial(_preprocess_nuskin_food, 'pepper'),\n 'nuskin_food_tofu' : partial(_preprocess_nuskin_food, 'tofu'),\n 'nuskin_food_water' : partial(_preprocess_nuskin_food, 'water'),\n 'nuskin_food_watermelon' : partial(_preprocess_nuskin_food, 'watermelon')\n }\n \n preprocessor_biotac = {\n 'biotac_tool_20' : partial(_preprocess_biotac_tool, 20),\n 'biotac_tool_30' : partial(_preprocess_biotac_tool, 30),\n 'biotac_tool_50' : partial(_preprocess_biotac_tool, 50),\n 'biotac_handover_rod' : partial(_preprocess_biotac_handover, 'rod'),\n 'biotac_handover_box' : partial(_preprocess_biotac_handover, 'box'),\n 'biotac_handover_plate' : partial(_preprocess_biotac_handover, 'plate'),\n 'biotac_food_apple' : partial(_preprocess_biotac_food, 'apple'),\n 'biotac_food_banana' : partial(_preprocess_biotac_food, 'banana'),\n 'biotac_food_empty' : partial(_preprocess_biotac_food, 'empty'),\n 'biotac_food_pepper' : partial(_preprocess_biotac_food, 'pepper'),\n 'biotac_food_tofu' : partial(_preprocess_biotac_food, 'tofu'),\n 'biotac_food_water' : partial(_preprocess_biotac_food, 'water'),\n 'biotac_food_watermelon' : partial(_preprocess_biotac_food, 'watermelon')\n }\n\n if dataset in preprocessor_nuskin:\n signals, labels = preprocessor_nuskin[dataset](data_directory, frequency=frequency)\n \n elif dataset in preprocessor_biotac:\n signals, labels = preprocessor_biotac[dataset](data_directory, frequency=frequency)\n \n else: raise Exception('Dataset not found')\n \n np.savez(f'{dataset}_{frequency}hz.npz', signals=signals, labels=labels)\n print(f'Preprocessing for {dataset} for {frequency} Hz completed with signals.shape = {signals.shape} and labels.shape = {labels.shape}')\n\n\n# _ _ _ _ _____ \n# | \\ | | | | | | | __ \\ \n# | \\| | ___ _ _| |_ ___ _ _ ___| |__ | |__) | __ ___ _ __ _ __ ___ ___ ___ ___ ___ ___ _ __ \n# | . ` |/ _ \\ | | | __/ _ \\| | | |/ __| '_ \\ | ___/ '__/ _ \\ '_ \\| '__/ _ \\ / __/ _ \\/ __/ __|/ _ \\| '__|\n# | |\\ | __/ |_| | || (_) | |_| | (__| | | | | | | | | __/ |_) | | | (_) | (_| __/\\__ \\__ \\ (_) | | \n# |_| \\_|\\___|\\__,_|\\__\\___/ \\__,_|\\___|_| |_| |_| |_| \\___| .__/|_| \\___/ \\___\\___||___/___/\\___/|_| \n# | | \n\n\ndef _preprocess_nuskin_tool(tool_length, data_directory, time_past=0.05, time_future=0.25, time_interval=0.005, frequency=4000):\n \n signals = None\n labels = None\n \n print(f'Preprocessing nuskin tool_{tool_length:02d} for {frequency} Hz ...')\n\n for trial in tqdm.tqdm(range(1, 11)):\n \n df_essentials = pd.read_csv(Path(data_directory)/f'nuskin/tool_1k/trial{trial}_{tool_length}_essentials.csv')\n df_raw = _read_nuskin_raw(Path(data_directory)/f'nuskin/tool_1k/trial{trial}_{tool_length}.tact')\n \n signals_temp = _bin_nuskin_signal(df_essentials.t.values, df_raw, time_past, time_future, time_interval, frequency)\n labels_temp = df_essentials.label_y.values\n \n signals = np.append(signals, signals_temp, axis=0) if signals is not None else signals_temp\n labels = np.append(labels, labels_temp, axis=0) if labels is not None else labels_temp\n \n return signals, labels\n\n\n# def _preprocess_nuskin_handover(item, data_directory, time_past=0.05, time_future=0.25, time_interval=0.005, frequency=4000):\n \n# signals = None\n# labels = None\n \n# df_essentials = pd.read_csv(Path(data_directory)/f'nuskin/handover/nt_essentials.csv')\n# df_essentials = df_essentials[df_essentials.obj == item]\n\n# print(f'Preprocessing nuskin handover {item} for {frequency} Hz ... ')\n\n# for _, row in tqdm.tqdm( df_essentials.iterrows(), total=df_essentials.shape[0] ):\n \n# df_raw = _read_nuskin_raw(Path(data_directory)/f'nuskin/handover/{row.fname}.tact')\n# tap_time = row.tapped_time\n \n# signals_temp = _bin_nuskin_signal(np.array([tap_time]), df_raw, time_past, time_future, time_interval, frequency)\n# labels_temp = row[['isPos', 'label_x_thumb', 'label_y_thumb', 'label_z_thumb', 'label_x_thumb_d', 'label_y_thumb_d', 'label_z_thumb_d', 'label_x_index', 'label_y_index', 'label_z_index', 'label_x_index_d', 'label_y_index_d', 'label_z_index_d']].values.astype('float')\n \n# signals = np.vstack((signals, signals_temp)) if signals is not None else signals_temp\n# labels = np.vstack((labels, labels_temp)) if labels is not None else labels_temp\n \n# return signals, labels\n\ndef _preprocess_nuskin_handover(item, data_directory, time_past=0.0, time_future=0.5, time_interval=0.005, frequency=4000):\n \n signals = None\n labels = None\n \n df_essentials = pd.read_csv(Path(data_directory)/f'nt_essentials.csv')\n df_essentials = df_essentials[df_essentials.obj == item]\n\n print(f'Preprocessing nuskin handover {item} for {frequency} Hz ... ')\n\n for _, row in tqdm.tqdm( df_essentials.iterrows(), total=df_essentials.shape[0] ):\n \n df_raw = _read_nuskin_raw(Path(data_directory)/f'{row.fname}.tact')\n tap_time = row.tapped_time\n \n signals_temp = _bin_nuskin_signal(np.array([tap_time]), df_raw, time_past, time_future, time_interval, frequency)\n labels_temp = row[['isPos', 'label_x_thumb', 'label_y_thumb', 'label_z_thumb', 'label_x_index', 'label_y_index', 'label_z_index']].values.astype('float')\n \n signals = np.vstack((signals, signals_temp)) if signals is not None else signals_temp\n labels = np.vstack((labels, labels_temp)) if labels is not None else labels_temp\n \n return signals, labels\n\n\n# def _preprocess_nuskin_food(item, data_directory, time_past=0.0, time_future=6.0, time_interval=0.05, frequency=4000):\n \n# import glob\n \n# signals = None\n# labels = None\n\n# print(f'Preprocessing nuskin food {item} for {frequency} Hz ... ')\n \n# for filename in tqdm.tqdm( Path(Path(data_directory)/f'nuskin/food/food_poking_batch1/').glob(f'{item}_zero_*[0-9].tact') ):\n \n# df_raw = _read_nuskin_raw(filename)\n# start_time = df_raw['t'][0]# + 2\n# signal_temp = _bin_nuskin_signal(np.array([start_time]), df_raw, time_past, time_future, time_interval, frequency)\n# signals = np.vstack((signals, signal_temp)) if signals is not None else signal_temp\n \n# labels = np.ones(signals.shape[0])\n \n# return signals, labels\n\ndef _preprocess_nuskin_food(item, data_directory, time_past=0.0, time_future=6.0, time_interval=0.05, frequency=4000):\n \n import glob\n \n signals = None\n labels = None\n\n print(f'Preprocessing nuskin food {item} for {frequency} Hz ... ')\n\n df_food = pd.read_csv(Path(data_directory)/'nt_essentials.csv', header=None)\n df_food = df_food[df_food[2] == item]\n\n for k, filename in tqdm.tqdm( df_food[0].iteritems(), total=df_food.shape[0] ):\n \n df_raw = _read_nuskin_raw(Path('../..')/filename)\n start_time = df_raw['t'][0]# + 2\n signal_temp = _bin_nuskin_signal(np.array([start_time]), df_raw, time_past, time_future, time_interval, frequency)\n signals = np.vstack((signals, signal_temp)) if signals is not None else signal_temp\n \n labels = np.ones(signals.shape[0])\n \n return signals, labels\n\n\ndef _read_nuskin_raw(filepath):\n \n df = pd.read_csv(filepath,\n names=['isPos', 'taxel', 'removable', 't'],\n dtype={'isPos': int , 'taxel': int, 'removable': int, 't': float},\n sep=' ')\n \n df.drop(['removable'], axis=1, inplace=True)\n df.drop(df.tail(1).index, inplace=True)\n df.drop(df.head(1).index, inplace=True)\n \n return df.reset_index(drop=True)\n\n\ndef _bin_nuskin_signal(tap_times, df_raw, time_past, time_future, time_interval, frequency):\n\n n_bins = int((time_past + time_future) / time_interval) + 1\n signals = np.zeros([len(tap_times), 80, n_bins], dtype=int)\n\n summer = 0\n vals = np.array([])\n \n for i, tap_time in enumerate(tap_times):\n \n df_timespan = df_raw[(df_raw.t >= (tap_time - time_past)) & (df_raw.t < (tap_time + time_future))]\n df_timespan = df_timespan.reset_index(drop=True)\n \n last_spiked = np.zeros(80)\n indices = []\n \n for j, sample in df_timespan.iterrows():\n \n if (sample.t - last_spiked[int(sample.taxel) - 1]) >= 1 / frequency:\n \n indices.append(j)\n last_spiked[int(sample.taxel) - 1] = sample.t\n \n df_timespan = df_timespan.iloc[indices]\n df_timespan = df_timespan.reset_index(drop=True)\n \n df_positive = df_timespan[df_timespan.isPos == 1]\n df_negative = df_timespan[df_timespan.isPos == 0]\n\n t = tap_time - time_past\n k = 0\n\n while t < (tap_time + time_future):\n \n positive_taxels = df_positive[((df_positive.t >= t) & (df_positive.t < t + time_interval))].taxel\n if len(positive_taxels):\n for taxel in positive_taxels:\n signals[i, taxel - 1, k] += 1\n \n negative_taxels = df_negative[((df_negative.t >= t) & (df_negative.t < t + time_interval))].taxel\n if len(negative_taxels):\n for taxel in negative_taxels:\n signals[i, taxel - 1, k] -= 1\n \n t += time_interval\n k += 1\n \n return signals\n\n\n# ____ _ _ _____ \n# | _ \\(_) | | | __ \\ \n# | |_) |_ ___ | |_ __ _ ___ | |__) | __ ___ _ __ _ __ ___ ___ ___ ___ ___ ___ _ __ \n# | _ <| |/ _ \\| __/ _` |/ __| | ___/ '__/ _ \\ '_ \\| '__/ _ \\ / __/ _ \\/ __/ __|/ _ \\| '__|\n# | |_) | | (_) | || (_| | (__ | | | | | __/ |_) | | | (_) | (_| __/\\__ \\__ \\ (_) | | \n# |____/|_|\\___/ \\__\\__,_|\\___| |_| |_| \\___| .__/|_| \\___/ \\___\\___||___/___/\\___/|_| \n# | | \n\n\ndef _preprocess_biotac_tool(tool_length, data_directory, samples_past=100, samples_future=500, frequency=2200):\n \n signals = None\n labels = None\n \n print(f'Preprocessing biotac_tool_{tool_length} for {frequency} Hz ... ')\n\n for trial in tqdm.tqdm( range(1, 21) ):\n \n df_essentials = pd.read_csv(Path(data_directory)/f'biotac/tool_1k/trial{trial}_{tool_length}_essentials.csv')\n df_raw = pd.read_csv(Path(data_directory)/f'biotac/tool_1k/trial{trial}_{tool_length}.csv')\n \n signals_trial = _crop_biotac_signal(df_essentials.orignal_index.values, df_raw, samples_past, samples_future)\n labels_trial = df_essentials.label_y.values\n \n signals = np.append(signals, signals_trial, axis=0) if signals is not None else signals_trial\n labels = np.append(labels, labels_trial, axis=0) if labels is not None else labels_trial\n \n return _downsample_biotac_signal(signals, frequency), labels\n\n\ndef _preprocess_biotac_handover(item, data_directory, samples_past=100, samples_future=500, frequency=2200):\n \n signals = None\n labels = None\n \n df_essentials = pd.read_csv(Path(data_directory)/f'biotac/handover/bt_essentials.csv')\n df_essentials = df_essentials[df_essentials.obj == item]\n \n print(f'Preprocessing biotac handover {item} for {frequency} Hz ... ')\n \n for _, row in tqdm.tqdm( df_essentials.iterrows(), total=df_essentials.shape[0] ) :\n \n df_raw = pd.read_csv(Path(data_directory)/f'biotac/handover/{row.fname}.csv')\n tap_index = np.abs(df_raw.t - row.tapped_time).argmin()\n \n signals_temp = _crop_biotac_signal(np.array([tap_index]), df_raw, samples_past, samples_future)\n labels_temp = row[['isPos', 'label_x_thumb', 'label_y_thumb', 'label_z_thumb', 'label_x_thumb_d', 'label_y_thumb_d', 'label_z_thumb_d', 'label_x_index', 'label_y_index', 'label_z_index', 'label_x_index_d', 'label_y_index_d', 'label_z_index_d']].values.astype('float')\n \n signals = np.vstack((signals, signals_temp)) if signals is not None else signals_temp\n labels = np.vstack((labels, labels_temp)) if labels is not None else labels_temp\n \n return _downsample_biotac_signal(signals, frequency), labels\n\n\ndef _preprocess_biotac_food(item, data_directory, samples_past=600, samples_future=600, frequency=2200):\n \n import glob\n \n signals = None\n labels = None\n \n for filename in tqdm.tqdm( Path( Path(data_directory)/f'biotac/food/').glob(f'{item}_zero_*.csv') ):\n \n df_raw = pd.read_csv(filename)\n signal_temp = df_raw.pac.values\n trigger_index = np.argmax(np.abs(signal_temp))\n signal_temp = signal_temp[trigger_index-samples_past:trigger_index+samples_future]\n\n signals = np.vstack((signals, signal_temp)) if signals is not None else signal_temp\n \n labels = np.ones(signals.shape[0])\n \n return _downsample_biotac_signal(signals, frequency), labels\n\n\ndef _crop_biotac_signal(tap_indices, df_raw, samples_past=100, samples_future=500):\n \n signals = np.zeros((len(tap_indices), samples_past + samples_future))\n \n for i, tap_index in enumerate(tap_indices):\n signals[i] = (df_raw.iloc[tap_index-samples_past:tap_index+samples_future].pac.values)\n \n return signals\n\n\ndef _downsample_biotac_signal(signals, frequency=2200):\n \n if frequency == 2200: return signals\n \n samples_to_keep = int(frequency / 2200 * signals.shape[1]) + 1\n indices = np.round(np.linspace(0, signals.shape[1] - 1, samples_to_keep)).astype(int)\n \n return signals[:, indices]\n\n\n# _____ _ _____ _____ _ __ \n# / ____| | |_ _| |_ _| | | / _| \n# | | | | | | | | _ __ | |_ ___ _ __| |_ __ _ ___ ___ \n# | | | | | | | | | '_ \\| __/ _ \\ '__| _/ _` |/ __/ _ \\\n# | |____| |____ _| |_ _| |_| | | | || __/ | | || (_| | (_| __/\n# \\_____|______|_____| |_____|_| |_|\\__\\___|_| |_| \\__,_|\\___\\___|\n# \n\n\nif __name__ == '__main__':\n\n import sys\n\n if len(sys.argv) == 4:\n \n preprocess(sys.argv[1], sys.argv[2], int(sys.argv[3]))\n","repo_name":"clear-nus/ext-sense","sub_path":"data/preprocessed/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":15837,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"}
+{"seq_id":"32648990602","text":"#!/usr/bin/python3\n\"\"\"function that search and update file based on a specific string\"\"\"\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"\n inserts a line of text to a file, after each line containing\n a specific string\n\n Arguments:\n filename - name of text file\n search_string - string to search for\n new_string - string to be replaced with\n \"\"\"\n\n string = \"\"\n with open(filename) as f:\n for lines in f:\n string += lines\n if search_string in lines:\n string += new_string\n with open(filename, \"w\", encoding=\"utf-8\") as text:\n text.write(string)\n","repo_name":"TessyJames28/alx-higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"34983875634","text":"#!/usr/bin/env python3\n\"\"\" Task 1 \"\"\"\n\nimport csv\nimport math\nfrom typing import List, Tuple\n\n\ndef index_range(page: int, page_size: int) -> Tuple[int]:\n \"\"\" Index_range\n Arguments:\n ---------\n `page`: current page number\n `page_size`: items number in every page\n Return:\n -------\n list for those particular pagination parameters\n \"\"\"\n start: int\n end: int\n\n if page == 1:\n start = 0\n else:\n start = (page - 1) * page_size\n\n end = page * page_size\n\n return (start, end)\n\n\nclass Server:\n \"\"\"Server class to paginate a database of popular baby names.\n \"\"\"\n DATA_FILE = \"Popular_Baby_Names.csv\"\n\n def __init__(self):\n self.__dataset = None\n\n def dataset(self) -> List[List]:\n \"\"\"Cached dataset\n \"\"\"\n if self.__dataset is None:\n with open(self.DATA_FILE) as f:\n reader = csv.reader(f)\n dataset = [row for row in reader]\n self.__dataset = dataset[1:]\n\n return self.__dataset\n\n def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n \"\"\" paginate the dataset correctly and\n return the appropriate page of the dataset\n \"\"\"\n assert(type(page) == int)\n assert(type(page_size) == int)\n assert(page > 0)\n assert(page_size > 0)\n dataset = self.dataset()\n start, end = index_range(page, page_size)\n return dataset[start: end]\n","repo_name":"EGabriel-bot/holbertonschool-web_back_end","sub_path":"0x04-pagination/1-simple_pagination.py","file_name":"1-simple_pagination.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"15906199268","text":"\"\"\"adding columnqs\n\nRevision ID: c7d9cb770a1c\nRevises: \nCreate Date: 2022-04-29 02:06:47.069409\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c7d9cb770a1c'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('employee', schema=None) as batch_op:\n batch_op.drop_column('assigned_id')\n\n with op.batch_alter_table('flex_status', schema=None) as batch_op:\n batch_op.add_column(sa.Column('issue', sa.String(length=100), nullable=True))\n batch_op.alter_column('enterTime',\n existing_type=sa.DATETIME(),\n nullable=True)\n batch_op.alter_column('exitTime',\n existing_type=sa.DATETIME(),\n nullable=True)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('flex_status', schema=None) as batch_op:\n batch_op.alter_column('exitTime',\n existing_type=sa.DATETIME(),\n nullable=False)\n batch_op.alter_column('enterTime',\n existing_type=sa.DATETIME(),\n nullable=False)\n batch_op.drop_column('issue')\n\n with op.batch_alter_table('employee', schema=None) as batch_op:\n batch_op.add_column(sa.Column('assigned_id', sa.VARCHAR(length=10), nullable=True))\n\n # ### end Alembic commands ###\n","repo_name":"parasat00/SoftwareProject","sub_path":"migrations/versions/c7d9cb770a1c_adding_columnqs.py","file_name":"c7d9cb770a1c_adding_columnqs.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"75009421832","text":"from utils.measure import checker\n\nclass Solution:\n\n def minEatingSpeed(self, piles: list[int], H: int) -> int:\n l, r = 1, max(piles)\n k = 0\n\n while l <= r:\n m = (l + r) // 2\n\n totalTime = 0\n for p in piles:\n totalTime += ((p - 1) // m) + 1\n if totalTime <= H:\n k = m\n r = m - 1\n else:\n l = m + 1\n return k\n\n\ndef main():\n with checker(Solution().minEatingSpeed, repeat=1_000_000) as c:\n c.check_2([3,6,7,11], 8, 4)\n c.check_2([30,11,23,4,20], 5, 30)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"ashbyp/pscratch","sub_path":"algorithms/l/mineatingspeed.py","file_name":"mineatingspeed.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"14157828484","text":"from flask import Flask, request, render_template, make_response\r\nimport random\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef addizioni():\r\n n1 = random.randint(0, 10)\r\n n2 = random.randint(0, 10)\r\n if request.method == 'POST':\r\n if request.form['input'] == request.cookies.get('risultato'):\r\n return render_template(\"giusto.html\", risultato=request.form['input'])\r\n else:\r\n return render_template(\"sbagliato.html\", risultato_sbagliato=request.form['input'], risultato_corretto=request.cookies.get('risultato'))\r\n else:\r\n resp = make_response(render_template(\"index.html\", numero1=str(n1), numero2=str(n2)))\r\n resp.set_cookie('risultato', str(n1+n2))\r\n return resp\r\n","repo_name":"Bess6598/Flask-example_INFOUMA_UNIPI","sub_path":"addizioni.py","file_name":"addizioni.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"74195854792","text":"from rest_framework import serializers\n\nfrom .models import Quote\n\n\nclass QuoteSerializer(serializers.ModelSerializer):\n\n def to_representation(self, instance):\n representation = super(QuoteSerializer, self).to_representation(instance)\n domain_name = 'http://127.0.0.1:8010'\n full_path = domain_name + instance.image.url\n representation['image'] = full_path\n return representation\n\n class Meta:\n fields = '__all__'\n model = Quote\n","repo_name":"zaleksandrne/bquotes","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"24279555947","text":"\n__copyright__ = \"Copyright 2016, http://radical.rutgers.edu\"\n__license__ = \"MIT\"\n\n\nimport os\n\nimport radical.utils as ru\n\nfrom .base import LaunchMethod\n\n\n# ------------------------------------------------------------------------------\n#\nclass SSH(LaunchMethod):\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, name, lm_cfg, rm_info, log, prof):\n\n LaunchMethod.__init__(self, name, lm_cfg, rm_info, log, prof)\n\n\n # --------------------------------------------------------------------------\n #\n def _init_from_scratch(self, env, env_sh):\n\n command = ru.which('ssh')\n\n if not command:\n raise RuntimeError(\"ssh not found!\")\n\n # Some MPI environments (e.g. SGE) put a link to rsh as \"ssh\" into\n # the path. We try to detect that and then use different arguments.\n is_rsh = False\n if os.path.islink(command):\n\n command = os.path.realpath(command)\n\n if os.path.basename(command) == 'rsh':\n self._log.info('Detected that \"ssh\" is a link to \"rsh\".')\n is_rsh = True\n\n if not is_rsh:\n command += ' -o StrictHostKeyChecking=no -o ControlMaster=auto'\n\n lm_info = {'env' : env,\n 'env_sh' : env_sh,\n 'command': command}\n\n return lm_info\n\n\n # --------------------------------------------------------------------------\n #\n def _init_from_info(self, lm_info):\n\n self._env = lm_info['env']\n self._env_sh = lm_info['env_sh']\n self._command = lm_info['command']\n\n assert self._command\n\n\n # --------------------------------------------------------------------------\n #\n def finalize(self):\n\n pass\n\n\n # --------------------------------------------------------------------------\n #\n def can_launch(self, task):\n\n # ensure single rank\n if len(task['slots']['ranks']) > 1:\n return False, 'more than one rank'\n\n # ensure non-MPI\n if task['description']['use_mpi']:\n return False, 'cannot launch MPI tasks'\n\n if not task['description']['executable']:\n return False, 'no executable'\n\n return True, ''\n\n\n # --------------------------------------------------------------------------\n #\n def get_launcher_env(self):\n\n return ['. $RP_PILOT_SANDBOX/%s' % self._env_sh]\n\n\n # --------------------------------------------------------------------------\n #\n def get_launch_cmds(self, task, exec_path):\n\n slots = task['slots']\n\n if len(slots['ranks']) != 1:\n raise RuntimeError('ssh cannot run multi-rank tasks')\n\n host = slots['ranks'][0]['node_name']\n cmd = '%s %s %s' % (self._command, host, exec_path)\n return cmd.rstrip()\n\n\n # --------------------------------------------------------------------------\n #\n def get_rank_cmd(self):\n\n return 'export RP_RANK=0\\n'\n\n\n # --------------------------------------------------------------------------\n #\n def get_exec(self, task):\n\n td = task['description']\n task_exec = td['executable']\n task_args = td.get('arguments')\n task_argstr = self._create_arg_string(task_args)\n command = '%s %s' % (task_exec, task_argstr)\n\n return command.rstrip()\n\n\n# ------------------------------------------------------------------------------\n\n","repo_name":"radical-cybertools/radical.pilot","sub_path":"src/radical/pilot/agent/launch_method/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"}
+{"seq_id":"31777277274","text":"from .msh import latticeresult_to_msh\nfrom .md import latticesystem_to_md\n\ndef outputs_from_json(sysdct: dict):\n outputs = {}\n for casedct in sysdct['cases']:\n name = casedct['name']\n outputs[name] = []\n if 'outputs' in casedct:\n outputs[name] = outputs[name] + casedct['outputs']\n return outputs\n","repo_name":"Xero64/pyvlm","sub_path":"pyvlm/outputs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"}
+{"seq_id":"8037930541","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pickle\nfrom torch import optim\nfrom torch.utils.data import DataLoader\n\nfrom qm9_dataset import qm9_dataset\n\ndef main():\n train_dataset = qm9_dataset(directory = \"QM9_smiles/\", split = 'train', test_percent = 10, on_the_fly = True, max_num_atoms = 9)\n test_dataset = qm9_dataset(directory = \"QM9_smiles/\", split = 'test', test_percent = 10, on_the_fly = True, max_num_atoms = 9)\n \n train_dataloader = DataLoader(train_dataset, 20, shuffle = True)\n test_dataloader = DataLoader(test_dataset, 20, shuffle = False)\n\n for batch_idx, data in enumerate(train_dataloader):\n print(data['num_atoms'])\n print(data['adj'])\n print(data['laplacian'])\n print(data['node_feat'])\n print(data['edge_feat'])\n print(data['alpha'])\n break\n\n for batch_idx, data in enumerate(test_dataloader):\n print(data['num_atoms'].size())\n print(data['adj'].size())\n print(data['laplacian'].size())\n print(data['node_feat'].size())\n print(data['edge_feat'].size())\n print(data['alpha'].size())\n break\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HyTruongSon/MGVAE","sub_path":"supervised_learning_molecules/check_qm9.py","file_name":"check_qm9.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"}
+{"seq_id":"25770308216","text":"import sys\n# Pandas (Data analysis and manipulation) [pip3 install pandas]\nimport pandas as pd\n# Matplotlib (Visualization) [pip3 install matplotlib]\nimport matplotlib.pyplot as plt\n\ndef main():\n # Input Data:\n # PY: Python_OPC_UA_Data_Evaluation_Sync, Python_OPC_UA_Data_Evaluation_ASync\n # C#: CSharp_OPC_UA_Data_Evaluation_Sync\n FILE_NAME = 'CSharp_OPC_UA_Data_Evaluation_Sync'\n\n # Read Data from the File\n ua_client_data = pd.read_csv(f'Data//{FILE_NAME}.txt')\n print('[INFO] The data is successfully read from the file.')\n\n # Assign data to variables\n # Time [ms]\n time = ua_client_data[ua_client_data.columns[0]]\n print(f'[INFO] Number of input data: {len(time)}')\n\n # Create figure\n fig, ax = plt.subplots()\n\n fig.suptitle(f'File name: {FILE_NAME}.txt', fontsize = 20)\n\n for i, col in enumerate(ua_client_data.columns):\n if i > 0:\n # Raw Data\n plt.plot(time, ua_client_data[col], label=f'OPCUA Variable {i}')\n\n # Axis Parameters:\n # Label\n ax.set_xlabel(r't (ms)')\n ax.set_ylabel(f'Sine (t)')\n # Other dependencies\n ax.grid(linewidth = 0.75, linestyle = '--')\n ax.legend(fontsize=10.0)\n\n print('[INFO] Display the result.')\n # Display the result\n plt.show()\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"rparak/OPCUA_Simple","sub_path":"Python_Client/opcua_evaluation.py","file_name":"opcua_evaluation.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"}
+{"seq_id":"15702628146","text":"\n# coding: utf-8\n\n# ##Age and Gender Classification Using Convolutional Neural Networks - Demo\n# \n# This code is released with the paper:\n# \n# Gil Levi and Tal Hassner, \"Age and Gender Classification Using Convolutional Neural Networks,\" IEEE Workshop on Analysis and Modeling of Faces and Gestures (AMFG), at the IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), Boston, June 2015\n# \n# If you find the code useful, please add suitable reference to the paper in your work.\n\n# ## Loading the mean image\n\n# In[1]:\n\nimport caffe\n\n\n# In[2]:\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n#caffe_root = './caffe/' \n#import sys\n#sys.path.insert(0, caffe_root + 'python')\nimport caffe\n\nplt.rcParams['figure.figsize'] = (10, 10)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n\n# In[3]:\n\nmean_filename='./gender_age_detection/mean.binaryproto'\nproto_data = open(mean_filename, \"rb\").read()\na = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)\nmean = caffe.io.blobproto_to_array(a)[0]\n\n\n# ## Loading the age network\n\n# In[4]:\n\n# age_net_pretrained='./age_net.caffemodel'\n# age_net_model_file='./deploy_age.prototxt'\n# age_net = caffe.Classifier(age_net_model_file, age_net_pretrained,\n# mean=mean,\n# channel_swap=(2,1,0),\n# raw_scale=255,\n# image_dims=(256, 256))\n\n\n# ## Loading the gender network\n\n# In[4]:\n\ngender_net_pretrained='./gender_age_detection/gender_net.caffemodel'\ngender_net_model_file='./gender_age_detection/deploy_gender.prototxt'\ngender_net = caffe.Classifier(gender_net_model_file, gender_net_pretrained,\n mean=mean,\n channel_swap=(2,1,0),\n raw_scale=255,\n image_dims=(256, 256))\n\n\n# ## Labels\n\n# In[5]:\n\nage_list=['(0, 2)','(4, 6)','(8, 12)','(15, 20)','(25, 32)','(38, 43)','(48, 53)','(60, 100)']\ngender_list=['Male','Female']\n\n\n# ## Reading and plotting the input image\n\n# In[7]:\n\n#example_image = './images/images-10000000_1463349307034642_1694941330775474176_n.mp4/Img149.jpg'\n#example_image = './images/2017 natok/lux-natok/Shesher Golpo _ Full Drama _ Lux Chirochena Shourobher Golpo-DerWLNZC_IU.mp4/Img2.jpg'\n#input_image = caffe.io.load_image(example_image)\n#_ = plt.imshow(input_image)\n#prediction = gender_net.predict([input_image]) \n\n#print('predicted gender:', gender_list[prediction[0].argmax()])\n\n\n# In[3]:\n\n#prediction = age_net.predict([input_image]) \n\n#print('predicted age:', age_list[prediction[0].argmax()])\n\n\n# ## Gender prediction\n\n# In[7]:\n\n#prediction = gender_net.predict([input_image]) \n\n#print('predicted gender:', gender_list[prediction[0].argmax()])\n\n\n# In[12]:\n\n# Haarcascade models\n\n#eye_cascade = cv2.CascadeClassifier('/home/tonmoy/anaconda3/share/OpenCV/haarcascades/haarcascade_eye.xml')\n\n#img = cv2.imread('test.jpg')\n#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n\n# In[6]:\n\n#function to write in csv\nimport csv\ndef write_list_in_file(final, name):\n with open(name, \"w\", newline=\"\",encoding=\"utf8\") as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerows(final)\n\n\n# In[7]:\n\n\nimport os\ndef get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\n\n# In[8]:\n\n#Function to read csv files\nfrom csv import reader\n# Load a CSV file\\n\",\ndef load_csv(filename):\n dataset = list()\n with open(filename, 'r') as file:\n csv_reader = reader(file)\n for row in csv_reader:\n if not row:\n continue\n dataset.append(row)\n return dataset\n\n\n# In[ ]:\n\nimport pylab\nimport imageio\nimport os\n\nimg_path = './images/talkshow/tritiyomatra/new' #path to images\ncsv_path = './csvs/final_csvs/tritiyomatra/new'\ndir_list = get_immediate_subdirectories(img_path)\n#print(dir_list)\n\nfor directory in dir_list:\n \n print(directory)\n path = csv_path+'/result1_'+directory+'.csv'\n if(os.path.isfile(path)):\n continue\n else:\n myTable = load_csv(csv_path+'/result_'+directory+'.csv')\n print(len(myTable))\n myTable[0].append(\"gender\")\n for i in range(1, len(myTable)):\n try: \n input_image = caffe.io.load_image(img_path+'/'+directory+'/Img'+str(i)+'.jpg')\n #predict gender\n #_ = plt.imshow(input_image)\n prediction = gender_net.predict([input_image]) \n #print(img)\n #print('predicted gender:', gender_list[prediction[0].argmax()])\n if(i%100==0):\n print(i);\n #prediction = gender_net.predict([input_image]) \n myTable[i].append(str(gender_list[prediction[0].argmax()]))\n\n #i = i+1\n except:\n #i = i+1\n continue;\n #Write the results in csv \n write_list_in_file(myTable,csv_path+'/result1_'+directory+'.csv')\n\n\n\n\n","repo_name":"tonmoycsedu/tv.emotion.mining","sub_path":"gender_detection.py","file_name":"gender_detection.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20436253428","text":"import numpy as np\nimport os\nfrom configparser import ConfigParser\nfrom generator import AugmentedImageSequence\nfrom models.keras import ModelFactory\nfrom sklearn.metrics import roc_auc_score\nfrom utility import get_sample_counts\n\n\ndef main():\n # parser config\n config_file = \"./config.ini\"\n cp = ConfigParser()\n cp.read(config_file)\n\n # default config\n output_dir = cp[\"DEFAULT\"].get(\"output_dir\")\n base_model_name = cp[\"DEFAULT\"].get(\"base_model_name\")\n class_names = cp[\"DEFAULT\"].get(\"class_names\").split(\",\")\n image_source_dir = cp[\"DEFAULT\"].get(\"image_source_dir\")\n\n # train config\n image_dimension = cp[\"TRAIN\"].getint(\"image_dimension\")\n\n # test config\n batch_size = cp[\"TEST\"].getint(\"batch_size\")\n test_steps = cp[\"TEST\"].get(\"test_steps\")\n use_best_weights = cp[\"TEST\"].getboolean(\"use_best_weights\")\n\n # parse weights file path\n output_weights_name = cp[\"TRAIN\"].get(\"output_weights_name\")\n weights_path = os.path.join(output_dir, output_weights_name)\n best_weights_path = os.path.join(output_dir, f\"best_{output_weights_name}\")\n\n # get test sample count\n test_counts, _ = get_sample_counts(output_dir, \"test\", class_names)\n\n # compute steps\n if test_steps == \"auto\":\n test_steps = int(test_counts / batch_size)\n else:\n try:\n test_steps = int(test_steps)\n except ValueError:\n raise ValueError(f\"\"\"\n test_steps: {test_steps} is invalid,\n please use 'auto' or integer.\n \"\"\")\n print(f\"** test_steps: {test_steps} **\")\n\n print(\"** load model **\")\n if use_best_weights:\n print(\"** use best weights **\")\n model_weights_path = best_weights_path\n else:\n print(\"** use last weights **\")\n model_weights_path = weights_path\n model_factory = ModelFactory()\n model = model_factory.get_model(\n class_names,\n model_name=base_model_name,\n use_base_weights=False,\n weights_path=model_weights_path)\n\n print(\"** load test generator **\")\n test_sequence = AugmentedImageSequence(\n dataset_csv_file=os.path.join(output_dir, \"dev.csv\"),\n class_names=class_names,\n source_image_dir=image_source_dir,\n batch_size=batch_size,\n target_size=(image_dimension, image_dimension),\n augmenter=None,\n steps=test_steps,\n shuffle_on_epoch_end=False,\n )\n\n print(\"** make prediction **\")\n y_hat = model.predict_generator(test_sequence, verbose=1)\n y = test_sequence.get_y_true()\n\n test_log_path = os.path.join(output_dir, \"test.log\")\n print(f\"** write log to {test_log_path} **\")\n aurocs = []\n with open(test_log_path, \"w\") as f:\n for i in range(len(class_names)):\n try:\n score = roc_auc_score(y[:, i], y_hat[:, i])\n aurocs.append(score)\n except ValueError:\n score = 0\n f.write(f\"{class_names[i]}: {score}\\n\")\n mean_auroc = np.mean(aurocs)\n f.write(\"-------------------------\\n\")\n f.write(f\"mean auroc: {mean_auroc}\\n\")\n print(f\"mean auroc: {mean_auroc}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"brucechou1983/CheXNet-Keras","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":290,"dataset":"github-code","pt":"28"}
+{"seq_id":"42869145272","text":"import numpy as np\nimport time\nimport functools\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport os\nfrom sklearn.neighbors import NearestNeighbors\nfrom lib.utils.meshrenderer import meshrenderer\nimport open3d as o3d\nfrom lib.config import cfg\nfrom lib.utils.pysixd import transform, misc\n\n# Constants\n\nN = 3000 # number of random points in the dataset\ndim = 3 # number of dimensions of the points\nverbose = False\n# max_mean_dist_factor = 2.0\nangle_change_limit = 20 * np.pi / 180. # = 20 deg #0.5236=30 deg\n\ndef lazy_property(function):\n attribute = '_cache_' + function.__name__\n\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n\n return decorator\n\ndef best_fit_transform(A, B, depth_only=False, no_depth=False):\n '''\n Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions\n Input:\n A: Nxm numpy array of corresponding points\n B: Nxm numpy array of corresponding points\n Returns:\n T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B\n R: mxm rotation matrix\n t: mx1 translation vector\n '''\n\n assert A.shape == B.shape\n\n # get number of dimensions\n m = A.shape[1]\n\n # translate points to their centroids\n centroid_A = np.mean(A, axis=0)\n centroid_B = np.mean(B, axis=0)\n AA = A - centroid_A\n BB = B - centroid_B\n\n if depth_only == True and no_depth == False:\n R = np.eye(3)\n t = centroid_B.T - centroid_A.T\n #t = np.array([0, 0, t[2]])\n else:\n # rotation matrix\n H = np.dot(AA.T, BB)\n U, S, Vt = np.linalg.svd(H)\n R = np.dot(Vt.T, U.T)\n # special reflection case\n if np.linalg.det(R) < 0:\n Vt[m - 1, :] *= -1\n R = np.dot(Vt.T, U.T)\n\n t = centroid_B.T - np.dot(R, centroid_A.T)\n if no_depth == True and depth_only == False:\n t = np.array([t[0], t[1], 0])\n\n T = np.identity(m + 1)\n T[:m, :m] = R\n T[:m, m] = t\n\n return T, R, t\n\n\ndef nearest_neighbor(src, dst):\n '''\n Find the nearest (Euclidean) neighbor in dst for each point in src\n Input:\n src: Nxm array of points\n dst: Nxm array of points\n Output:\n distances: Euclidean distances of the nearest neighbor\n indices: dst indices of the nearest neighbor\n '''\n\n assert src.shape == dst.shape\n\n neigh = NearestNeighbors(n_neighbors=1)\n neigh.fit(dst)\n distances, indices = neigh.kneighbors(src, return_distance=True)\n return distances.ravel(), indices.ravel()\n\n\ndef icp(A, B, init_pose=None, max_iterations=200, tolerance=0.001, verbose=False, depth_only=False, no_depth=False):\n '''\n The Iterative Closest Point method: finds best-fit transform that maps points A on to points B\n Input:\n A: Nxm numpy array of source mD points\n B: Nxm numpy array of destination mD point\n init_pose: (m+1)x(m+1) homogeneous transformation\n max_iterations: exit algorithm after max_iterations\n tolerance: convergence criteria\n Output:\n T: final homogeneous transformation that maps A on to B\n distances: Euclidean distances (errors) of the nearest neighbor\n i: number of iterations to converge\n '''\n\n assert A.shape == B.shape\n\n\n\n # get number of dimensions\n m = A.shape[1]\n\n # make points homogeneous, copy them to maintain the originals\n src = np.ones((m + 1, A.shape[0]))\n dst = np.ones((m + 1, B.shape[0]))\n src[:m, :] = np.copy(A.T)\n dst[:m, :] = np.copy(B.T)\n\n # apply the initial pose estimation\n if init_pose is not None:\n src = np.dot(init_pose, src)\n\n prev_error = 0\n if verbose:\n plt.clf()\n fig = plt.figure(1)\n ax = Axes3D(fig)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]] * 3)\n ax.scatter(A[:, 0], A[:, 1], A[:, 2], label='initial', marker='.', c='green')\n ax.scatter(B[:, 0], B[:, 1], B[:, 2], label='target', marker='.', c='blue')\n\n for i in range(max_iterations):\n # find the nearest neighbors between the current source and destination points\n distances, indices = nearest_neighbor(src[:m, :].T, dst[:m, :].T)\n\n # compute the transformation between the current source and nearest destination points\n T, _, _ = best_fit_transform(src[:m, :].T, dst[:m, indices].T, depth_only=depth_only, no_depth=no_depth)\n\n # update the current source\n src = np.dot(T, src)\n\n mean_error = np.mean(distances)\n # print mean_error\n # check error\n if np.abs(prev_error - mean_error) < tolerance:\n break\n prev_error = mean_error\n\n # calculate final transformation\n T, _, _ = best_fit_transform(A, src[:m, :].T, depth_only=depth_only, no_depth=no_depth)\n\n if verbose:\n anim = ax.scatter(src[0, :], src[1, :], src[2, :], label='estimated', marker='.', c='red')\n plt.legend()\n plt.show()\n\n return T, distances, i\n\n\nclass SynRenderer(object):\n def __init__(self, class_type):\n MODEL_PATH = os.path.join('data/linemod', 'cad/cad.ply')\n self.model_path = MODEL_PATH.replace('cad', class_type)\n self.renderer\n\n @lazy_property\n def renderer(self):\n #import ipdb; ipdb.set_trace()\n # if self.model == 'cad':\n # if self.model == 'reconst':\n return meshrenderer.Renderer([self.model_path],1,'.',1000)\n\n def generate_synthetic_depth(self, K_test, R_est, t_est, test_shape, display=False):\n # renderer = meshrenderer.Renderer(['/data/SLC_precise_blue.ply'],1,'.',1)\n # R = transform.random_rotation_matrix()[:3,:3]\n H_test, W_test = test_shape[:2]\n grey, depth_x = self.renderer.render(\n obj_id=0,\n W=W_test,\n H=H_test,\n K=K_test,\n R=R_est,\n t=t_est,\n near=10,\n far=10000,\n random_light=False\n )\n if display:\n import matplotlib.pyplot as plt\n plt.subplot(121)\n plt.imshow(depth_x)\n plt.subplot(122)\n plt.imshow(grey)\n plt.show()\n pts = misc.rgbd_to_point_cloud(K_test, depth_x)[0]\n\n return pts\n\n def render_trafo(self, K_test, R_est, t_est, test_shape, downSample=1):\n W_test, H_test = test_shape[:2]\n\n bgr, depth_x = self.renderer.render(\n obj_id=0,\n W=W_test, # /downSample,\n H=H_test, # /downSample,\n K=K_test,\n R=R_est,\n t=np.array(t_est),\n near=10,\n far=10000,\n random_light=False\n )\n return bgr\n\ndef icp_refinement(depth_crop, icp_renderer, R_est, t_est, K_test, test_render_dims, depth_only=False, no_depth=False,\n max_mean_dist_factor=2.0):\n synthetic_pts = icp_renderer.generate_synthetic_depth(K_test, R_est, t_est, depth_crop.shape)\n centroid_synthetic_pts = np.mean(synthetic_pts, axis=0)\n try:\n max_mean_dist = np.max(np.linalg.norm(synthetic_pts - centroid_synthetic_pts, axis=1))\n except:\n return (R_est, t_est)\n\n real_depth_pts = misc.rgbd_to_point_cloud(K_test, depth_crop)[0]\n real_synmean_dist = np.linalg.norm(real_depth_pts - centroid_synthetic_pts, axis=1)\n real_depth_pts = real_depth_pts[real_synmean_dist < max_mean_dist_factor * max_mean_dist]\n if len(real_depth_pts) < len(synthetic_pts) / 20.:\n print('not enough visible points')\n R_refined = R_est\n t_refined = t_est\n else:\n sub_idcs_real = np.random.choice(len(real_depth_pts), np.min([len(real_depth_pts), len(synthetic_pts), N]))\n sub_idcs_syn = np.random.choice(len(synthetic_pts), np.min([len(real_depth_pts), len(synthetic_pts), N]))\n #T = icp_wrap(synthetic_pts, real_depth_pts, threshold=0.000000002)\n T, distances, iterations = icp(synthetic_pts[sub_idcs_syn], real_depth_pts[sub_idcs_real],\n tolerance=0.0000005, verbose=verbose, depth_only=depth_only, no_depth=no_depth)\n\n if no_depth == True:\n angle, _, _ = transform.rotation_from_matrix(T)\n if np.abs(angle) > angle_change_limit:\n T = np.eye(4)\n\n H_est = np.zeros((4, 4))\n # R_est, t_est is from model to camera\n H_est[3, 3] = 1\n H_est[:3, 3] = t_est\n H_est[:3, :3] = R_est\n\n H_est_refined = np.dot(T, H_est)\n\n R_refined = H_est_refined[:3, :3]\n t_refined = H_est_refined[:3, 3]\n\n return (R_refined, t_refined)\n","repo_name":"zju3dv/clean-pvnet","sub_path":"lib/utils/icp_utils.py","file_name":"icp_utils.py","file_ext":"py","file_size_in_byte":8918,"program_lang":"python","lang":"en","doc_type":"code","stars":363,"dataset":"github-code","pt":"28"}
+{"seq_id":"3040356908","text":"import sys\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QPushButton, QWidget\nclass WebWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"PyQt加载网站示例\")\n\n self.web_view = QWebEngineView()\n self.load_button = QPushButton(\"加载网站\")\n self.load_button.clicked.connect(self.load_website)\n\n layout = QVBoxLayout()\n layout.addWidget(self.web_view)\n layout.addWidget(self.load_button)\n\n central_widget = QWidget()\n central_widget.setLayout(layout)\n\n self.setCentralWidget(central_widget)\n\n def load_website(self):\n self.web_view.setUrl(QtCore.QUrl(\"https://www.google.com\")) # 使用QtCore.QUrl导入\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n window = WebWindow()\n window.show()\n\n sys.exit(app.exec_()) #!!! 重要 卡死\n","repo_name":"alphandbelt/PythonKeepLearning","sub_path":"Plans/20230808/qt_network.py","file_name":"qt_network.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"21976229925","text":"#Este código mostra quantas vogais tem em uma tupla.\r\n#Eric Peneres Carneiro#\r\n\r\np = ('Teste', 'Auto', 'Carta', 'Intensivo', 'Brando')\r\ncont = 0\r\n\r\nfor i in p:\r\n print (f'\\nNa palavra {i} tem: ')\r\n for l in i:\r\n if l in 'aeiou':\r\n print (l, end = ' ')","repo_name":"EricW900/Curso-de-Python-do-Curso-em-Video-","sub_path":"Exercicios_Guanabara/Mundo 3/077_vogais.py","file_name":"077_vogais.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20248354237","text":"# https://practice.geeksforgeeks.org/problems/kth-smallest-element5635/1\n\nfrom heapq import heappop, heappush, heapify\nclass Solution:\n def kthSmallest(self,arr, l, r, k):\n '''\n arr : given array\n l : starting index of the array i.e 0\n r : ending index of the array i.e size-1\n k : find kth smallest element and return using this function\n '''\n \n heap=[]\n heapify(heap)\n \n for i in range(l,r+1):\n # pushing -1*element, so as to make it a max heap\n # and size of heap should not exceed k\n # so after traversing the whole array,\n # the top element of the heap will give kth smallest\n # element.\n \n heappush(heap, -1*arr[i])\n \n if len(heap)>k:\n heappop(heap)\n \n ans=heappop(heap)\n return ans*-1\n \n# Time Complexity: O(n*logk)\n# Space Complexity: O(k+1)\n","repo_name":"Muskan0/DSA-Interview-Questions","sub_path":"heap/kth_smallest_element.py","file_name":"kth_smallest_element.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"28"}
+{"seq_id":"4303212460","text":"from utils import plot_examples\nfrom albumentations import augmentations\nimport cv2\nimport albumentations as A\nimport numpy as np\n# from PIL import Image\n\nimage = cv2.imread('./images/cat.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nbboxes = [[13, 170, 224, 410]]\n\n# There are different types of bounding boxes\n# the one used above is the Pascal_voc --> (x_min, y_min, x_max, y_max)\n# Other object detections have different formats ---> YOLO, COCO\n\ntransform = A.Compose(\n [\n A.Resize(width=1920, height=1080),\n A.RandomCrop(width=1280, height=720),\n A.Rotate(limit=35, border_mode=cv2.BORDER_CONSTANT),\n A.HorizontalFlip(),\n A.VerticalFlip(p=0.1),\n A.RGBShift(r_shift_limit=25, g_shift_limit=35, b_shift_limit=25),\n A.OneOf([\n A.ColorJitter(p=0.5),\n A.Blur(blur_limit=3, p=0.5)\n ], p=0.1)\n ], bbox_params=A.BboxParams(format=\"pascal_voc\", min_area=2048,\n label_fields=[], min_visibility=0.3)\n)\n\nimg_list = [image]\n# we don't need to convert the image to numpy array, as we used opencv\nsaved_bboxes = [bboxes[0]]\n\nfor i in range(15):\n augmentations = transform(image=image, bboxes=bboxes)\n augmented_img = augmentations[\"image\"]\n\n if len(augmentations['bboxes']) == 0:\n continue\n\n img_list.append(augmented_img)\n saved_bboxes.append(augmentations['bboxes'][0])\n\n\n# plotting the results\nplot_examples(img_list, bboxes=saved_bboxes)\n","repo_name":"amanosan/Pytorch-Tutorials","sub_path":"Albumentations for Data Augmentation/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"29086056109","text":"from data_manager import DataManager\nfrom flight_search import FlightSearch\nfrom flight_data import FlightData\nfrom notification_manager import NotificationManager\n\n\nFLY_FROM_CODE_DEFAULT = \"YYZ\"\nDATE_FROM_DEFAULT = \"01/02/2023\"\nDATE_TO_DEFAULT = \"01/08/2023\"\nMAX_STOPOVERS = 0\n\n\ndef hasACheaperPrice(googleSheetFlight, responseLowestFlightPrice):\n return float(responseLowestFlightPrice) < float(googleSheetFlight)\n\n\ndef getTheMostCheapFlightFromApiResponse(listOfFlights):\n cheapestFlight = listOfFlights[0]\n lowest_price = cheapestFlight['price']\n\n for flight in listOfFlights:\n if flight['price'] < lowest_price:\n cheapestFlight = flight\n lowest_price = flight['price']\n return cheapestFlight\n\n\ndef main():\n # Initiate a new DataManager Object and fetch Data from Google sheet.\n dataManager = DataManager()\n flightData = FlightData()\n flightSearch = FlightSearch()\n notificationManager = NotificationManager()\n\n googleSheetFlights = dataManager.retrieveDataFromGoogleSheet()\n\n # Go through Each flightnfrom Google sheet\n for googleSheetFlight in googleSheetFlights:\n\n flyToCode = googleSheetFlight['iataCode']\n try:\n flightsResponse = flightSearch.fetchDataFromFlightSearchApi(\n FLY_FROM_CODE_DEFAULT, flyToCode, DATE_FROM_DEFAULT, DATE_TO_DEFAULT)\n flightsFormattedData = flightData.structureFlightDataFromSearchFlightResponse(\n flightsResponse)\n if len(flightsFormattedData) <= 0:\n raise Exception(\"Not data found\")\n except Exception as e:\n print(e)\n else:\n cheapestFlight = getTheMostCheapFlightFromApiResponse(\n flightsFormattedData)\n # Compare if the \"lowestPrice\" from API is lower than current price from Google sheet\n if hasACheaperPrice(googleSheetFlight['lowestPrice'], cheapestFlight['price']):\n # Send SMS to user\n messageBody = notificationManager.getDataFromDict(\n cheapestFlight)\n notificationManager.SendSMS(messageBody)\n print(\"Message has been sent\")\n else:\n print(\"No cheapest flight found\")\n\n\nmain()\n","repo_name":"narvaezfb/Flight-Deal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"1706778641","text":"def dash(dt):\n dsh = dt.lett_count('-')\n if dsh == 1:\n return 1\n return 2\n\n\ndata = input()\nusername, language, points = data.split(\"-\")\nexam_data = {}\n\nsubmissions = {}\nresult = 2\nwhile not data == \"exam finished\":\n if result == 2:\n username, language, points = data.split(\"-\")\n points = int(points)\n if username not in exam_data:\n exam_data[username] = {\"username\": username, \"language\": language, \"points\": points}\n if language not in submissions:\n submissions[language] = 1\n else:\n submissions[language] += 1\n\n else:\n digit_points = exam_data[username][\"points\"]\n digit_points = int(digit_points)\n if points > digit_points:\n exam_data[username] = {\"username\": username, \"language\": language, \"points\": points}\n submissions[language] += 1\n else: # user has been banned\n username, language = data.split(\"-\")\n del exam_data[username] # delete the user from exam data\n\n data = input()\n result = dash(data) # check if user-banned command has been received\n\nsorted_results = sorted(exam_data.items(), key=lambda kvp: (-kvp[1][\"points\"], kvp[1][\"username\"]))\nsorted_sumbissions = sorted(submissions.items(), key=lambda kvp: (-kvp[1], kvp[0]))\n\nprint(\"Results:\")\nfor user, value in sorted_results:\n print(f\"{user} | {value['points']}\")\nprint(\"Submissions:\")\nfor lang, digit in sorted_sumbissions:\n print(f\"{lang} - {digit}\")","repo_name":"vasetousa/Python-fundamentals","sub_path":"lists_dictionary/SoftUni Exam Results.py","file_name":"SoftUni Exam Results.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"28002174322","text":"\nimport akshare as ak\nimport pandas as pd\n\nfrom minitrade.datasource.base import QuoteSource\n\n# Need to manually copy libmini_racer.dylib on Mac M1, see https://github.com/sqreen/PyMiniRacer/issues/143\n\n\nclass EastMoneyQuoteSource(QuoteSource):\n '''EastMoney data source'''\n\n def _daily_bar(self, ticker, start, end) -> pd.DataFrame:\n df: pd.DataFrame = ak.stock_zh_a_hist(\n symbol=ticker, period=\"daily\",\n start_date=start.replace('-', ''),\n end_date=end.replace('-', '') if end else '20500101', # magic number used in ak lib\n adjust='qfq')\n df = df.rename(columns={'日期': 'dt', '开盘': 'Open', '收盘': 'Close', '最高': 'High', '最低': 'Low', '成交量': 'Volume'})\n df['dt'] = pd.to_datetime(df['dt'], format=\"%Y-%m-%d\")\n df = df.set_index('dt')\n df = df.tz_localize('Asia/Shanghai')\n df = df[['Open', 'High', 'Low', 'Close', 'Volume']]\n return df\n","repo_name":"ruifeng96150/minitrade","sub_path":"minitrade/datasource/eastmoney.py","file_name":"eastmoney.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"28"}
+{"seq_id":"20072611155","text":"from pytorch_quik import ddp\n\n\ndef test_tq_bar():\n size = 5\n pbar = ddp.tq_bar(size)\n for _ in range(size):\n pbar.update()\n pbar.close()\n del pbar\n\n\ndef test_find_free_port():\n port = ddp.find_free_port()\n assert isinstance(port, int)\n","repo_name":"donchesworth/pytorch-quik","sub_path":"pytorch_quik/tests/test_ddp.py","file_name":"test_ddp.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"45015571597","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0004_remove_userprofile_cohort'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='case',\n name='hierarchy',\n field=models.ForeignKey(to='pagetree.Hierarchy'),\n ),\n ]\n","repo_name":"ccnmtl/uelc","sub_path":"uelc/main/migrations/0005_auto_20141126_1425.py","file_name":"0005_auto_20141126_1425.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"10705630266","text":"import boto3\r\nimport base64\r\nfrom botocore.exceptions import BotoCoreError, ClientError\r\n\r\ndef get_secret(secret_name, region_name):\r\n session = boto3.session.Session()\r\n client = session.client(\r\n service_name='secretsmanager',\r\n region_name=region_name\r\n )\r\n\r\n try:\r\n get_secret_value_response = client.get_secret_value(\r\n SecretId=secret_name\r\n )\r\n except ClientError as e:\r\n raise Exception(\"Couldn't retrieve the secret\") from e\r\n else:\r\n if 'SecretString' in get_secret_value_response:\r\n return get_secret_value_response['SecretString']\r\n else:\r\n return base64.b64decode(get_secret_value_response['SecretBinary'])\r\n\r\ndef lambda_handler(event, context):\r\n secret = get_secret('MyAPIKey', 'us-west-2')\r\n print(f\"API Key: {secret}\")\r\n # Now use the secret (API Key) to make your API request\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'body': json.dumps('Hello from Lambda!')\r\n }\r\n","repo_name":"Ethycs/aws_secrets_service","sub_path":"aws_gpt4_lambda_snippet.py","file_name":"aws_gpt4_lambda_snippet.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"70460490636","text":"from . import testutils\nif __name__ == \"__main__\":\n testutils.setup_environment(\"../../../proto\")\nimport pkg5unittest\n\nimport errno\nimport os\nimport re\nimport unittest\n\nimport pkg.misc as misc\nfrom pkg.client.pkgdefs import *\n\nclass TestPkgChangeVariant(pkg5unittest.SingleDepotTestCase):\n # Only start/stop the depot once (instead of for every test)\n persistent_setup = True\n\n pkg_i386 = \"\"\"\n open pkg_i386@1.0,5.11-0\n add set name=variant.arch value=i386\n add dir mode=0755 owner=root group=bin path=/shared\n add dir mode=0755 owner=root group=bin path=/unique\n add file tmp/pkg_i386/shared/pkg_i386_shared mode=0555 owner=root group=bin path=shared/pkg_arch_shared variant.arch=i386\n add file tmp/pkg_i386/unique/pkg_i386 mode=0555 owner=root group=bin path=unique/pkg_i386 variant.arch=i386\n close\"\"\"\n\n pkg_sparc = \"\"\"\n open pkg_sparc@1.0,5.11-0\n add set name=variant.arch value=sparc\n add dir mode=0755 owner=root group=bin path=/shared\n add dir mode=0755 owner=root group=bin path=/unique\n add file tmp/pkg_sparc/shared/pkg_sparc_shared mode=0555 owner=root group=bin path=shared/pkg_arch_shared variant.arch=sparc\n add file tmp/pkg_sparc/unique/pkg_sparc mode=0555 owner=root group=bin path=unique/pkg_sparc variant.arch=sparc\n close\"\"\"\n\n pkg_shared = \"\"\"\n open pkg_shared@1.0,5.11-0\n add set name=variant.arch value=sparc value=i386 value=zos\n add set name=variant.opensolaris.zone value=global value=nonglobal\n add dir mode=0755 owner=root group=bin path=/shared\n add dir mode=0755 owner=root group=bin path=/unique\n add file tmp/pkg_shared/shared/common mode=0555 owner=root group=bin path=shared/common\n add file tmp/pkg_shared/shared/pkg_shared_i386 mode=0555 owner=root group=bin path=shared/pkg_shared variant.arch=i386\n add file tmp/pkg_shared/shared/pkg_shared_sparc mode=0555 owner=root group=bin path=shared/pkg_shared variant.arch=sparc\n add file tmp/pkg_shared/shared/global_motd mode=0555 owner=root group=bin path=shared/zone_motd variant.opensolaris.zone=global\n add file tmp/pkg_shared/shared/nonglobal_motd mode=0555 owner=root group=bin path=shared/zone_motd variant.opensolaris.zone=nonglobal\n add file tmp/pkg_shared/unique/global mode=0555 owner=root group=bin path=unique/global variant.opensolaris.zone=global\n add file tmp/pkg_shared/unique/nonglobal mode=0555 owner=root group=bin path=unique/nonglobal variant.opensolaris.zone=nonglobal\n close\"\"\"\n\n pkg_unknown = \"\"\"\n open unknown@1.0\n add set name=variant.unknown value=bar value=foo\n add file tmp/bar path=usr/bin/bar mode=0755 owner=root group=root variant.unknown=bar\n add file tmp/foo path=usr/bin/foo mode=0755 owner=root group=root variant.unknown=foo\n close\n open unknown@2.0\n add set name=variant.unknown value=bar value=foo\n add file tmp/bar path=usr/bin/foobar mode=0755 owner=root group=root variant.unknown=bar\n add file tmp/foo path=usr/bin/foobar mode=0755 owner=root group=root variant.unknown=foo\n close\n open unknown-boolean@1.0\n add set name=variant.unknown value=true value=false\n add file tmp/bar path=usr/bin/bar mode=0755 owner=root group=root variant.unknown=false\n add file tmp/foo path=usr/bin/foo mode=0755 owner=root group=root variant.unknown=true\n close\n open unknown-boolean@2.0\n add set name=variant.unknown value=true value=false\n add file tmp/bar path=usr/bin/foobar mode=0755 owner=root group=root variant.unknown=false\n add file tmp/foo path=usr/bin/foobar mode=0755 owner=root group=root variant.unknown=true\n close\n \"\"\"\n\n # this package intentionally has no variant.arch specification.\n pkg_inc = \"\"\"\n open pkg_inc@1.0,5.11-0\n add depend fmri=pkg_i386@1.0,5.11-0 type=incorporate\n add depend fmri=pkg_sparc@1.0,5.11-0 type=incorporate\n add depend fmri=pkg_shared@1.0,5.11-0 type=incorporate\n close\"\"\"\n\n pkg_cluster = \"\"\"\n open pkg_cluster@1.0,5.11-0\n add set name=variant.arch value=sparc value=i386 value=zos\n add depend fmri=pkg_i386@1.0,5.11-0 type=require variant.arch=i386\n add depend fmri=pkg_sparc@1.0,5.11-0 type=require variant.arch=sparc\n add depend fmri=pkg_shared@1.0,5.11-0 type=require\n close\"\"\"\n\n pkg_list_all = set([\n \"pkg_i386\",\n \"pkg_sparc\",\n \"pkg_shared\",\n \"pkg_inc\",\n \"pkg_cluster\"\n ])\n\n misc_files = [\n \"tmp/pkg_i386/shared/pkg_i386_shared\",\n \"tmp/pkg_i386/unique/pkg_i386\",\n\n \"tmp/pkg_sparc/shared/pkg_sparc_shared\",\n \"tmp/pkg_sparc/unique/pkg_sparc\",\n\n \"tmp/pkg_shared/shared/common\",\n \"tmp/pkg_shared/shared/pkg_shared_i386\",\n \"tmp/pkg_shared/shared/pkg_shared_sparc\",\n \"tmp/pkg_shared/shared/global_motd\",\n \"tmp/pkg_shared/shared/nonglobal_motd\",\n \"tmp/pkg_shared/unique/global\",\n \"tmp/pkg_shared/unique/nonglobal\",\n\n \"tmp/bar\",\n \"tmp/foo\"\n ]\n\n def setUp(self):\n pkg5unittest.SingleDepotTestCase.setUp(self)\n\n self.make_misc_files(self.misc_files)\n self.pkgsend_bulk(self.rurl, (self.pkg_i386, self.pkg_sparc,\n self.pkg_shared, self.pkg_inc, self.pkg_cluster,\n self.pkg_unknown))\n\n # verify pkg search indexes\n self.verify_search = True\n\n # verify installed images before changing variants\n self.verify_install = False\n\n def __assert_variant_matches_tsv(self, expected, errout=None,\n exit=0, opts=misc.EmptyI, names=misc.EmptyI, su_wrap=False):\n self.pkg(\"variant {0} -H -F tsv {1}\".format(\" \".join(opts),\n \" \".join(names)), exit=exit, su_wrap=su_wrap)\n self.assertEqualDiff(expected, self.output)\n if errout:\n self.assertTrue(self.errout != \"\")\n else:\n self.assertEqualDiff(\"\", self.errout)\n\n def f_verify(self, path, token=None, negate=False):\n \"\"\"Verify that the specified path exists and contains\n the specified token. If negate is true, then make sure\n the path doesn't either doesn't exist, or if it does that\n it doesn't contain the specified token.\"\"\"\n\n file_path = os.path.join(self.get_img_path(), path)\n\n try:\n f = open(file_path)\n except IOError as e:\n if e.errno == errno.ENOENT and negate:\n return\n raise\n\n if negate and not token:\n self.assertTrue(False,\n \"File exists when it shouldn't: {0}\".format(path))\n\n token_re = re.compile(\n \"^\" + token + \"$\" \\\n \"|^\" + token + \"[/_]\" \\\n \"|[/_]\" + token + \"$\" \\\n \"|[/_]\" + token + \"[/_]\")\n\n found = False\n for line in f:\n if token_re.search(line):\n found = True\n break\n f.close()\n\n if not negate and not found:\n self.assertTrue(False, \"File {0} ({1}) does not contain {2}\".format(\n path, file_path, token))\n if negate and found:\n self.assertTrue(False, \"File {0} ({1}) contains {2}\".format(\n path, file_path, token))\n\n def p_verify(self, p=None, v_arch=None, v_zone=None, negate=False):\n \"\"\"Given a specific architecture and zone variant, verify\n the contents of the specified within an image. If\n negate is true then verify that the package isn't\n installed, and that actions delivered by the package\n don't exist in the target image.\n\n This routine has hard coded knowledge of the test package\n names, variants, and dependancies. So any updates made\n to the test package will also likely required updates to\n this function.\"\"\"\n\n assert p != None\n assert v_arch == 'i386' or v_arch == 'sparc' or v_arch == 'zos'\n assert v_zone == 'global' or v_zone == 'nonglobal'\n\n # make sure the package is installed\n if negate:\n self.pkg(\"list {0}\".format(p), exit=1)\n else:\n self.pkg(\"list {0}\".format(p))\n self.pkg(\"verify {0}\".format(p))\n\n # nothing to verify for packages with no content\n if p == 'pkg_inc':\n return\n if p == 'pkg_cluster':\n return\n\n # verify package contents\n if p == 'pkg_i386':\n assert negate or v_arch == 'i386'\n self.f_verify(\"shared/pkg_arch_shared\", \"i386\", negate)\n self.f_verify(\"unique/pkg_i386\", \"i386\", negate)\n return\n elif p == 'pkg_sparc':\n assert negate or v_arch == 'sparc'\n self.f_verify(\"shared/pkg_arch_shared\", \"sparc\", negate)\n self.f_verify(\"unique/pkg_sparc\", \"sparc\", negate)\n return\n elif p == 'pkg_shared':\n self.f_verify(\"shared/common\", \"common\", negate)\n self.f_verify(\"shared/pkg_shared\", v_arch, negate)\n self.f_verify(\"shared/zone_motd\", v_zone, negate)\n if negate:\n self.f_verify(\"unique/global\", v_zone, True)\n self.f_verify(\"unique/nonglobal\", v_zone, True)\n elif v_zone == 'global':\n self.f_verify(\"unique/global\", v_zone, False)\n self.f_verify(\"unique/nonglobal\", v_zone, True)\n elif v_zone == 'nonglobal':\n self.f_verify(\"unique/global\", v_zone, True)\n self.f_verify(\"unique/nonglobal\", v_zone, False)\n return\n\n # NOTREACHED\n assert False\n\n def i_verify(self, v_arch=None, v_zone=None, pl=None):\n \"\"\"Given a specific architecture variant, zone variant,\n and package list, verify that the variant settings are\n correct for the current image, and that the image\n contains the specified packages. Also verify that the\n image doesn't contain any other unexpected packages.\n\n This routine has hard coded knowledge of the test package\n names, variants, and dependancies. So any updates made\n to the test package will also likely required updates to\n this function.\"\"\"\n\n assert v_arch == 'i386' or v_arch == 'sparc' or v_arch == 'zos'\n assert v_zone == 'global' or v_zone == 'nonglobal'\n\n if pl == None:\n pl = []\n\n # verify the variant settings\n ic = self.get_img_api_obj().img.cfg\n if \"variant.arch\" not in ic.variants:\n self.assertTrue(False,\n \"unable to determine image arch variant\")\n if ic.variants[\"variant.arch\"] != v_arch:\n self.assertTrue(False,\n \"unexpected arch variant: {0} != {1}\".format(\n ic.variants[\"variant.arch\"], v_arch))\n\n if \"variant.opensolaris.zone\" not in ic.variants:\n self.assertTrue(False,\n \"unable to determine image zone variant\")\n if ic.variants[\"variant.opensolaris.zone\"] != v_zone:\n self.assertTrue(False, \"unexpected zone variant\")\n\n\n # adjust the package list based on known dependancies.\n if 'pkg_cluster' in pl and 'pkg_shared' not in pl:\n pl.append('pkg_shared')\n if v_arch == 'i386':\n if 'pkg_cluster' in pl and 'pkg_i386' not in pl:\n pl.append('pkg_i386')\n elif v_arch == 'sparc':\n if 'pkg_cluster' in pl and 'pkg_sparc' not in pl:\n pl.append('pkg_sparc')\n\n #\n # Make sure the number of packages installed matches the\n # number of packages in pl.\n #\n self.pkg(\n \"list -H | wc -l | nawk '{{print $1'}} | grep '^{0:d}$'\".format(\n len(pl)))\n\n # make sure each specified package is installed\n for p in pl:\n self.p_verify(p, v_arch, v_zone)\n\n for p in (self.pkg_list_all - set(pl)):\n self.p_verify(p, v_arch, v_zone, negate=True)\n\n # make sure that pkg search doesn't report corrupted indexes\n if self.verify_search:\n for p in pl:\n self.pkg(\"search -l {0}\".format(p))\n\n def cv_test(self, v_arch, v_zone, pl, v_arch2, v_zone2, pl2,\n rv=EXIT_OK):\n \"\"\" test if change-variant works \"\"\"\n\n assert v_arch == 'i386' or v_arch == 'sparc' or v_arch == 'zos'\n assert v_arch2 == 'i386' or v_arch2 == 'sparc' or \\\n v_arch2 == 'zos'\n assert v_zone == 'global' or v_zone == 'nonglobal'\n assert v_zone2 == 'global' or v_zone2 == 'nonglobal'\n\n # create an image\n variants = {\n \"variant.arch\": v_arch,\n \"variant.opensolaris.zone\": v_zone\n }\n self.image_create(self.rurl, variants=variants)\n\n exp_tsv = \"\"\"\\\nvariant.arch\\t{0}\nvariant.opensolaris.zone\\t{1}\n\"\"\".format(v_arch, v_zone)\n self.__assert_variant_matches_tsv(exp_tsv)\n\n # install the specified packages into the image\n ii_args = \"\"\n for p in pl:\n ii_args += \" {0} \".format(p)\n self.pkg(\"install {0}\".format(ii_args))\n\n # if we're paranoid, then verify the image we just installed\n if self.verify_install:\n self.i_verify(v_arch, v_zone, pl)\n # change the specified variant\n cv_args = \"\"\n cv_args += \" -v\"\n cv_args += \" variant.arch={0}\".format(v_arch2)\n cv_args += \" variant.opensolaris.zone={0}\".format(v_zone2)\n\n self.pkg(\"change-variant\" + cv_args, exit=rv)\n # verify the updated image\n self.i_verify(v_arch2, v_zone2, pl2)\n\n exp_tsv = \"\"\"\\\nvariant.arch\\t{0}\nvariant.opensolaris.zone\\t{1}\n\"\"\".format(v_arch2, v_zone2)\n self.__assert_variant_matches_tsv(exp_tsv)\n\n self.image_destroy()\n\n def test_cv_01_none_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_cluster\"],\n \"i386\", \"global\", [\"pkg_cluster\"], rv=EXIT_NOP)\n\n def test_cv_01_none_2(self):\n self.cv_test(\"i386\", \"nonglobal\", [\"pkg_cluster\"],\n \"i386\", \"nonglobal\", [\"pkg_cluster\"], rv=EXIT_NOP)\n\n def test_cv_01_none_3(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_cluster\"],\n \"sparc\", \"global\", [\"pkg_cluster\"], rv=EXIT_NOP)\n\n def test_cv_01_none_4(self):\n self.cv_test(\"sparc\", \"nonglobal\", [\"pkg_cluster\"],\n \"sparc\", \"nonglobal\", [\"pkg_cluster\"], rv=EXIT_NOP)\n\n def test_cv_02_arch_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_shared\"],\n \"sparc\", \"global\", [\"pkg_shared\"])\n\n def test_cv_02_arch_2(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_shared\"],\n \"i386\", \"global\", [\"pkg_shared\"])\n\n def test_cv_03_arch_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_inc\"],\n \"sparc\", \"global\", [\"pkg_inc\"])\n\n def test_cv_03_arch_2(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_inc\"],\n \"i386\", \"global\", [\"pkg_inc\"])\n\n def test_cv_04_arch_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_i386\"],\n \"sparc\", \"global\", [])\n\n def test_cv_04_arch_2(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_sparc\"],\n \"i386\", \"global\", [])\n\n def test_cv_05_arch_1(self):\n self.cv_test(\"i386\", \"global\",\n [\"pkg_i386\", \"pkg_shared\", \"pkg_inc\"],\n \"sparc\", \"global\", [\"pkg_shared\", \"pkg_inc\"])\n\n def test_cv_05_arch_2(self):\n self.cv_test(\"sparc\", \"global\",\n [\"pkg_sparc\", \"pkg_shared\", \"pkg_inc\"],\n \"i386\", \"global\", [\"pkg_shared\", \"pkg_inc\"])\n\n def test_cv_06_arch_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_cluster\"],\n \"sparc\", \"global\", [\"pkg_cluster\"])\n\n def test_cv_06_arch_2(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_cluster\"],\n \"i386\", \"global\", [\"pkg_cluster\"])\n\n def test_cv_07_arch_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_cluster\", \"pkg_inc\"],\n \"sparc\", \"global\", [\"pkg_cluster\", \"pkg_inc\"])\n\n def test_cv_07_arch_2(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_cluster\", \"pkg_inc\"],\n \"i386\", \"global\", [\"pkg_cluster\", \"pkg_inc\"])\n\n def test_cv_08_zone_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_cluster\"],\n \"i386\", \"nonglobal\", [\"pkg_cluster\"])\n\n def test_cv_08_zone_2(self):\n self.cv_test(\"i386\", \"nonglobal\", [\"pkg_cluster\"],\n \"i386\", \"global\", [\"pkg_cluster\"])\n\n def test_cv_09_zone_1(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_cluster\"],\n \"sparc\", \"nonglobal\", [\"pkg_cluster\"])\n\n def test_cv_09_zone_2(self):\n self.cv_test(\"sparc\", \"nonglobal\", [\"pkg_cluster\"],\n \"sparc\", \"global\", [\"pkg_cluster\"])\n\n def test_cv_10_arch_and_zone_1(self):\n self.cv_test(\"i386\", \"global\", [\"pkg_cluster\"],\n \"sparc\", \"nonglobal\", [\"pkg_cluster\"])\n\n def test_cv_10_arch_and_zone_2(self):\n self.cv_test(\"sparc\", \"nonglobal\", [\"pkg_cluster\"],\n \"i386\", \"global\", [\"pkg_cluster\"])\n\n def test_cv_11_arch_and_zone_1(self):\n self.cv_test(\"i386\", \"nonglobal\", [\"pkg_cluster\"],\n \"sparc\", \"global\", [\"pkg_cluster\"])\n\n def test_cv_11_arch_and_zone_2(self):\n self.cv_test(\"sparc\", \"global\", [\"pkg_cluster\"],\n \"i386\", \"nonglobal\", [\"pkg_cluster\"])\n\n def test_cv_12_unknown(self):\n \"\"\"Ensure that packages with an unknown variant and\n non-conflicting content can be installed and subsequently\n altered using change-variant.\"\"\"\n\n self.image_create(self.rurl)\n\n # First test if unknown variant doesn't have the values of\n # true/false.\n\n # Install package with unknown variant and verify both files\n # are elided.\n self.pkg(\"install -v unknown@1.0\")\n for fname in (\"bar\", \"foo\"):\n self.f_verify(\"usr/bin/{0}\".format(fname), fname,\n negate=True)\n\n # Next, verify upgrade to version of package with unknown\n # variant won't fail if new version delivers conflicting content\n # and variant has not been set.\n self.pkg(\"update -vvv unknown@2.0\")\n # And the file is still elided.\n for fname in (\"bar\", \"foo\"):\n self.f_verify(\"usr/bin/foobar\", fname, negate=True)\n\n # Next, set unknown variant explicitly and verify content\n # changes as expected.\n self.pkg(\"change-variant unknown=foo\")\n # Verify that foo variant of foobar is now installed.\n self.f_verify(\"usr/bin/foobar\", \"foo\")\n self.f_verify(\"usr/bin/foobar\", \"bar\", negate=True)\n\n self.image_destroy()\n self.image_create(self.rurl)\n\n # Now test if unknown variant has the values of true/false.\n\n # Install package with unknown variant and verify that false\n # variant of content is installed since unknown variants have a\n # default value of 'false'.\n self.pkg(\"install -vvv unknown-boolean@1.0\")\n self.f_verify(\"usr/bin/bar\", \"bar\")\n self.f_verify(\"usr/bin/foo\", \"foo\", negate=True)\n\n # Next, verify upgrade to version of package with unknown\n # variant won't fail if new version delivers conflicting content\n # and variant has not been set.\n self.pkg(\"update -vvv unknown-boolean@2.0\")\n # And that false variant of content is installed.\n self.f_verify(\"usr/bin/foobar\", \"bar\")\n self.f_verify(\"usr/bin/foobar\", \"foo\", negate=True)\n\n # Next, set unknown variant explicitly and verify content\n # changes as expected. First test with uppercase 'True'.\n self.pkg(\"change-variant unknown=True\")\n # Verify that true variant of foobar is now installed.\n self.f_verify(\"usr/bin/foobar\", \"foo\")\n self.f_verify(\"usr/bin/foobar\", \"bar\", negate=True)\n # Now test with lowercase 'true'. Need to set the variant to\n # some other value first, otherwise it shows nothing to do.\n self.pkg(\"change-variant unknown=false\")\n self.pkg(\"change-variant unknown=true\")\n # Verify that true variant of foobar is now installed.\n self.f_verify(\"usr/bin/foobar\", \"foo\")\n self.f_verify(\"usr/bin/foobar\", \"bar\", negate=True)\n\n def test_cv_13_parsable(self):\n \"\"\"Test the parsable output of change-variant.\"\"\"\n\n self.image_create(self.rurl, variants={\n \"variant.arch\": \"i386\",\n \"variant.opensolaris.zone\": \"nonglobal\"\n })\n self.pkg(\"change-variant --parsable=0 variant.arch=sparc \"\n \"variant.opensolaris.zone=global\")\n self.assertEqualParsable(self.output, change_variants=[\n [\"variant.arch\", \"sparc\"],\n [\"variant.opensolaris.zone\", \"global\"]])\n self.pkg(\"change-variant --parsable=0 variant.arch=i386\")\n self.assertEqualParsable(self.output, change_variants=[\n [\"variant.arch\", \"i386\"]])\n\n def test_cv_14_invalid_variant(self):\n \"\"\"Test that invalid input is handled appropriately\"\"\"\n\n self.image_create(self.rurl, variants={\n \"variant.arch\": \"i386\",\n \"variant.opensolaris.zone\": \"nonglobal\"\n })\n self.pkg(\"install pkg_shared\")\n self.pkg(\"change-variant variant.opensolaris.zone=bogus\")\n\n def test_cv_15_invalid_variant_name(self):\n \"\"\"Test that invalid variant names are handled appropriately\"\"\"\n\n self.image_create(self.rurl)\n # This should pass because there are no illegal characters\n self.pkg(\"change-variant --no-refresh \"\n \"variant.foobar=false\", exit=0)\n # Variant names contain space, should raise an exception\n self.pkg(\"change-variant --no-refresh \"\n \"variant.foo\\ bar=false variant.bar\\ foo=false\", exit=1)\n self.assertTrue(\"variant.foo bar\" and \"variant.bar foo\"\n in self.errout)\n\n\nclass TestPkgChangeVariantPerTestRepo(pkg5unittest.SingleDepotTestCase):\n \"\"\"A separate test class is needed because these tests modify packages\n after they've been published and need to avoid corrupting packages for\n other tests.\"\"\"\n\n # Only start/stop the depot once (instead of for every test)\n persistent_setup = False\n # Tests in this suite use the read only data directory.\n need_ro_data = True\n\n pkg_shared = \"\"\"\n open pkg_shared@1.0,5.11-0\n add set name=variant.arch value=sparc value=i386 value=zos\n add set name=variant.opensolaris.zone value=global value=nonglobal\n add dir mode=0755 owner=root group=bin path=/shared\n add dir mode=0755 owner=root group=bin path=/unique\n add file tmp/pkg_shared/shared/common mode=0555 owner=root group=bin path=shared/common\n add file tmp/pkg_shared/shared/pkg_shared_i386 mode=0555 owner=root group=bin path=shared/pkg_shared variant.arch=i386\n add file tmp/pkg_shared/shared/pkg_shared_sparc mode=0555 owner=root group=bin path=shared/pkg_shared variant.arch=sparc\n add file tmp/pkg_shared/shared/global_motd mode=0555 owner=root group=bin path=shared/zone_motd variant.opensolaris.zone=global\n add file tmp/pkg_shared/shared/nonglobal_motd mode=0555 owner=root group=bin path=shared/zone_motd variant.opensolaris.zone=nonglobal\n add file tmp/pkg_shared/unique/global mode=0555 owner=root group=bin path=unique/global variant.opensolaris.zone=global\n add file tmp/pkg_shared/unique/nonglobal mode=0555 owner=root group=bin path=unique/nonglobal variant.opensolaris.zone=nonglobal\n\n close\"\"\"\n\n misc_files = [\n \"tmp/pkg_shared/shared/common\",\n \"tmp/pkg_shared/shared/pkg_shared_i386\",\n \"tmp/pkg_shared/shared/pkg_shared_sparc\",\n \"tmp/pkg_shared/shared/global_motd\",\n \"tmp/pkg_shared/shared/nonglobal_motd\",\n \"tmp/pkg_shared/unique/global\",\n \"tmp/pkg_shared/unique/nonglobal\"\n ]\n\n def setUp(self):\n pkg5unittest.SingleDepotTestCase.setUp(self)\n\n self.make_misc_files(self.misc_files)\n self.pkgsend_bulk(self.rurl, self.pkg_shared)\n\n def test_change_variants_with_changed_manifest(self):\n \"\"\"Test that if a package is installed but its manifest has\n changed in the repository, change variants doesn't use the\n changes.\"\"\"\n\n self.image_create(self.rurl, variants={\n \"variant.arch\": \"i386\",\n \"variant.opensolaris.zone\": \"nonglobal\"\n })\n self.seed_ta_dir(\"ta3\")\n self.pkg(\"install pkg_shared\")\n self.pkg(\"set-property signature-policy require-signatures\")\n self.pkg(\"change-variant variant.arch=sparc\", exit=1)\n\n # Specify location as filesystem path.\n self.pkgsign_simple(self.dc.get_repodir(), \"pkg_shared\")\n\n self.pkg(\"change-variant variant.arch=sparc\", exit=1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"oracle/solaris-ips","sub_path":"src/tests/cli/t_change_variant.py","file_name":"t_change_variant.py","file_ext":"py","file_size_in_byte":24880,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"28"}
+{"seq_id":"41301913528","text":"import sys\r\n\r\npainted = dict()\r\nglobal odd\r\nglobal xx\r\nglobal yy\r\nglobal dd\r\nodd = True\r\nxx = 0\r\nyy = 0\r\ndd = 0\r\ndirs = [(-1, 0), (0, 1), (1, 0), (0, -1)]\r\npainted[(0,0)] = 1\r\n\r\ndef output(step):\r\n global odd\r\n global xx\r\n global yy\r\n global dd\r\n if odd:\r\n painted[(xx,yy)] = step\r\n else:\r\n dd = (dd+(1 if step == 1 else 3))%4\r\n xx += dirs[dd][0]\r\n yy += dirs[dd][1]\r\n odd = not odd\r\n\r\ndef get_input():\r\n if (xx,yy) in painted:\r\n return painted[(xx,yy)]\r\n else:\r\n return 0\r\n\r\nfor line in sys.stdin:\r\n x = list(map(int, line.strip().split(',')))\r\n inp = 2\r\n ptr = 0\r\n relbase = 0\r\n paramlen = [0, 3, 3, 1, 1, 2, 2, 3, 3, 1]\r\n def gett(y, loc, pmode):\r\n if pmode == 0:\r\n pos = y[loc]\r\n elif pmode == 1:\r\n return y[loc]\r\n else:\r\n pos = y[loc]+relbase\r\n if pos >= len(y):\r\n y.extend([0] * (pos-len(y)+1))\r\n return y[pos]\r\n def sett(y, loc, pmode, val):\r\n if pmode == 0:\r\n pos = y[loc]\r\n else:\r\n pos = y[loc]+relbase\r\n if pos >= len(y):\r\n y.extend([0] * (pos-len(y)+1))\r\n y[pos] = val\r\n while x[ptr] % 100 != 99:\r\n cmd = x[ptr] % 100\r\n details = x[ptr] // 100\r\n params = []\r\n for i in range(paramlen[cmd]):\r\n val = details % 10\r\n details //= 10\r\n params.append(val)\r\n if cmd == 1:\r\n r = gett(x, ptr+1, params[0]) + gett(x, ptr+2, params[1])\r\n sett(x, ptr+3, params[2], r)\r\n ptr += 4\r\n elif cmd == 2:\r\n r = gett(x, ptr+1, params[0]) * gett(x, ptr+2, params[1])\r\n sett(x, ptr+3, params[2], r)\r\n ptr += 4\r\n elif cmd == 3:\r\n inp = get_input()\r\n sett(x, ptr+1, params[0], inp)\r\n ptr += 2\r\n elif cmd == 4:\r\n output(gett(x, ptr+1, params[0]))\r\n ptr += 2\r\n elif cmd == 5:\r\n if gett(x, ptr+1, params[0]) != 0:\r\n ptr = gett(x, ptr+2, params[1])\r\n else:\r\n ptr += 3\r\n elif cmd == 6:\r\n if gett(x, ptr+1, params[0]) == 0:\r\n ptr = gett(x, ptr+2, params[1])\r\n else:\r\n ptr += 3\r\n elif cmd == 7:\r\n r = 1 if gett(x, ptr+1, params[0]) < gett(x, ptr+2, params[1]) else 0\r\n sett(x, ptr+3, params[2], r)\r\n ptr += 4\r\n elif cmd == 8:\r\n r = 1 if gett(x, ptr+1, params[0]) == gett(x, ptr+2, params[1]) else 0\r\n sett(x, ptr+3, params[2], r)\r\n ptr += 4\r\n elif cmd == 9:\r\n relbase += gett(x, ptr+1, params[0])\r\n ptr += 2\r\n\r\npaintedx = [x for (x,y) in painted]\r\npaintedy = [y for (x,y) in painted]\r\nmx = min(paintedx)\r\nmy = min(paintedy)\r\npx = max(paintedx)\r\npy = max(paintedy)\r\nboard = []\r\nfor i in range(px-mx+1):\r\n board.append([0]*(py-my+1))\r\nfor (x,y) in painted:\r\n board[x-mx][y-my] = painted[(x,y)]\r\nprint(len(painted))\r\nfor line in board:\r\n print(''.join(map(lambda x: '#' if x == 1 else ' ', line)))","repo_name":"twattanawaroon/adventofcode","sub_path":"2019/q11b.py","file_name":"q11b.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"28676091960","text":"'''\n英文を単語ごとに分かち書きし,\nweblioからそれぞれの単語のレベルを取得(スクレイピング)した後に、\n合計レベル / 単語数 で平均を求める\n'''\nimport requests\nfrom bs4 import BeautifulSoup\n\n# 英文\ncontent = 'Camber Pharmaceuticals, Inc. recalled 87 lots of the blood pressure \\\n medication losartan on Thursday after discovering trace amounts of a \\\n potential carcinogen. The recalled \\\n 25 mg, 50 mg and 100 mg tablets contained small amounts of N-Nitroso \\\n N-Methyl 4-amino butyric acid, or NMBA, according to a company'\n\nsearch_list = []\ncontent = content.split(' ')\n\nfor w in content:\n if len(w) >= 4: # 単語が4文字以上かどうか\n if w[0] != w[0].upper(): # 単語の文頭が大文字かどうか(固有名詞は除く)\n if w.isalpha(): # 単語が数字かどうか(数字は除く)\n search_list.append(w)\n\n# print(search_list)\n\ntmp_url = 'https://ejje.weblio.jp/content/'\nword_cnt = 0\nlevel_sum = 0\nfor w in search_list:\n url = tmp_url + w\n html = requests.get(url).content\n soup = BeautifulSoup(html, \"html.parser\")\n\n all_level = soup.find_all('span', class_='learning-level-content')\n\n if all_level:\n word_cnt += 1\n level = all_level[0].contents[0]\n level_sum += int(level)\n # print(level)\n\nprint(int(level_sum / word_cnt))\n","repo_name":"kons16/SelectNews","sub_path":"hinshi.py","file_name":"hinshi.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"15839925666","text":"\n'''\nSame as m31_gausspy_testguesses_subcubes.py but now on the\nhigh-res cube with 1.2 km/s channels.\n'''\n\nimport os\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.utils.console import ProgressBar\nfrom spectral_cube import SpectralCube\nimport matplotlib.pyplot as plt\n\n\nfrom AGD_decomposer import AGD_loop\nfrom thickHI_model import fit_isoturbHI_model_simple\n\n\nrepo_path = os.path.expanduser(\"~/ownCloud/project_code/ThickHIFitting/\")\n\nconstants_script = os.path.join(repo_path, \"paths.py\")\nexec(compile(open(constants_script, \"rb\").read(), constants_script, 'exec'))\n\n# model_script = os.path.join(repo_path, \"thickHI_model.py\")\n# exec(compile(open(model_script, \"rb\").read(), model_script, 'exec'))\n\nfrom glob import glob\n\nosjoin = os.path.join\n\n\nfifteenAcubes = glob(fifteenA_HI_BC_1_2kms_data_wEBHIS_path(\"braun09_subcubes\") + \"/*.fits\")\n\nfifteenAcubes.sort()\n\nnoise_val = 4.8 * u.K\n\n# Change to data directory\nos.chdir(fifteenA_HI_BC_1_2kms_data_wEBHIS_path(\"braun09_subcubes\", no_check=True))\n\nfor i, subcube_name in enumerate(fifteenAcubes[::-1]):\n\n # if i != 0:\n # continue\n\n subcube_filename = os.path.split(subcube_name)[-1][:-5]\n\n subcube = SpectralCube.read(subcube_name)\n\n err_arr = noise_val * np.ones(subcube.shape[1:])\n\n chan_width = np.diff(subcube.spectral_axis)[0]\n\n peaktemp = subcube.max(axis=0)\n vcent = subcube.moment1()\n\n peakchans = subcube.argmax(axis=0)\n peakvels = np.take_along_axis(subcube.spectral_axis[:, np.newaxis,\n np.newaxis],\n peakchans[np.newaxis, :, :], 0)\n peakvels = peakvels.squeeze()\n peakvels = peakvels.to(u.km / u.s)\n\n # peak_name = fifteenA_HI_BCtaper_wEBHIS_HI_file_dict['PeakTemp']\n # peaktemp = Projection.from_hdu(fits.open(peak_name))\n\n # vcent_name = fourteenA_wEBHIS_HI_file_dict['Moment1']\n # vcent = Projection.from_hdu(fits.open(vcent_name)).to(u.km / u.s)\n\n # Restrict number of positions to fit.\n # 2-sigma limit (see if this is alright)\n mask_peak = peaktemp >= 2 * noise_val\n # Must have 5 channels above half the peak, following Braun+09\n mask_halfabovepeak = (subcube.filled_data[:] > 2 * noise_val).sum(0) > 5\n\n mask_positions = np.where(np.logical_and(mask_peak,\n mask_halfabovepeak))\n\n # Try ordering by peak temperature.\n # mask_positions = np.unravel_index(np.argsort(peaktemp.value.ravel())[::-1],\n # peaktemp.shape)\n\n # Parameters for the fit output\n thickHI_params = np.zeros((4,) + peaktemp.shape)\n thickHI_uncerts = np.zeros((4,) + peaktemp.shape)\n\n thickHI_model_cube = np.zeros(subcube.shape)\n\n max_gauss_comps = 10\n multigauss_params = np.zeros((3 * max_gauss_comps,) + peaktemp.shape)\n multigauss_uncerts = np.zeros((3 * max_gauss_comps,) + peaktemp.shape)\n\n multigauss_comps = np.zeros(peaktemp.shape)\n\n multigauss_model_cube = np.zeros(subcube.shape)\n\n fit_bics = np.zeros((2,) + peaktemp.shape)\n\n show_plots = False\n # show_plots = True\n\n pbar = ProgressBar(len(mask_positions[0]))\n\n for y, x in zip(mask_positions[0], mask_positions[1]):\n\n pbar.update()\n # print(f\"{y}, {x}\")\n\n spec = subcube[:, y, x].with_spectral_unit(u.km / u.s)\n\n # Fit that spectrum.\n\n thickHI_fit, vels, thickHI_fit_model = \\\n fit_isoturbHI_model_simple(spec.spectral_axis, # [spec_mask],\n spec, # [spec_mask],\n peakvels[y, x],\n err=noise_val,\n delta_vcent=10 * u.km / u.s,\n verbose=show_plots,\n plot_fit=show_plots,\n use_emcee=False,\n return_model=True,\n emcee_kwargs={'nwalkers': 4 * 10,\n 'burn': 2000,\n 'steps': 10000,\n 'workers': 4})\n\n if show_plots:\n plt.draw()\n input(\"?\")\n plt.close()\n\n agd_kwargs = {\"plot\": show_plots,\n \"verbose\": show_plots,\n \"SNR_thresh1\": 5.,\n \"SNR_thresh2\": 5.,\n \"SNR2_thresh1\": 4.,\n \"SNR2_thresh2\": 3.5,\n \"mode\": \"conv\",\n # \"mode\": \"python\",\n \"deblend\": True,\n \"intermediate_fit\": False,\n \"perform_final_fit\": False,\n \"component_sigma\": 5.}\n\n alphas = [2., 5., 10., 15., 20., 30., 50.]\n\n multigauss_fit, vels, multigauss_fit_model = \\\n AGD_loop(spec,\n noise_val.value * np.ones_like(spec.value),\n alphas,\n **agd_kwargs)\n\n assert thickHI_fit.success is True\n assert multigauss_fit.success is True\n\n # plt.draw()\n\n # input(\"?\")\n\n # plt.close()\n\n thickHI_model_cube[:, y, x] = thickHI_fit_model\n multigauss_model_cube[:, y, x] = multigauss_fit_model\n multigauss_comps[y, x] = len(multigauss_fit.params) // 3\n\n thickHI_params[:, y, x] = [thickHI_fit.params[par].value\n for par in thickHI_fit.params]\n if hasattr(thickHI_fit, 'covar'):\n thickHI_uncerts[:, y, x] = [thickHI_fit.params[par].stderr for par\n in thickHI_fit.params]\n else:\n thickHI_uncerts[:, y, x] = np.NaN\n\n for ii, par in enumerate(multigauss_fit.params):\n multigauss_params[ii, y, x] = multigauss_fit.params[par].value\n if hasattr(multigauss_fit, 'covar'):\n multigauss_uncerts[ii, y, x] = multigauss_fit.params[par].stderr\n else:\n multigauss_uncerts[ii, y, x] = np.NaN\n\n fit_bics[0, y, x] = thickHI_fit.bic\n fit_bics[1, y, x] = multigauss_fit.bic\n\n if show_plots:\n plt.draw()\n input(f\"{y} {x}\")\n # plt.close('all')\n plt.clf()\n\n print(argh)\n","repo_name":"e-koch/HI-LineShapes","sub_path":"subcube_tests/m31_gausspy_testguesses_subcubes_Bconfig.py","file_name":"m31_gausspy_testguesses_subcubes_Bconfig.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"33712170159","text":"import torch\nfrom transformers import BertTokenizer\nfrom tqdm import tqdm\nimport random\nimport json\n\n\nclass GPT2DataLoader:\n def __init__(\n self, corpus_path: str, tokenizer: BertTokenizer, seq_len=100,\n stride: int = None, batch_size=64, encoding=\"utf-8\", corpus_lines=None, on_memory=True\n ):\n \"\"\"\n 实现从语料库中获取某一batch数据\n :param corpus_path: 文本预料所在位置\n :param tokenizer: 分词器,使用bert tokenizer\n :param seq_len: 最长的序列长度\n :param stride: 步长\n :param batch_size:batch大小,默认设置64\n :param encoding: 默认\"utf-8\"\n :param corpus_lines: 提供语料行数,默认为None\n :param on_memory: 是否将预料全部存储于内存中\n \"\"\"\n self.tokenizer = tokenizer\n self.seq_len = seq_len\n self.stride = stride\n self.on_memory = on_memory\n self.corpus_lines = corpus_lines\n self.corpus_path = corpus_path\n self.encoding = encoding\n self.batch_size = batch_size\n\n with open(corpus_path, \"r\", encoding=encoding) as f:\n if self.corpus_lines is None and not on_memory:\n for _ in tqdm(f, desc=\"Loading Dataset\", total=corpus_lines):\n self.corpus_lines += 1\n\n if on_memory:\n self.lines = []\n for line in tqdm(f, desc=\"Loading Data\", total=corpus_lines):\n # json 文件预处理, 其他文件做相应处理即可\n json_line = json.loads(line)\n self.lines.append(\n \"关键词:\" + json_line[\"keywords\"].replace(\" \", \"\").strip() + \"\\t\" + json_line[\"content\"].strip()\n )\n self.corpus_lines = len(self.lines)\n\n if not on_memory:\n self.file = open(corpus_path, \"r\", encoding=encoding)\n self.random_file = open(corpus_path, \"r\", encoding=encoding)\n\n for _ in range(random.randrange(self.corpus_lines if self.corpus_lines < 1000 else 1000)):\n self.random_file.__next__()\n\n self.get_trans_dataset() # shuffle\n self.steps = len(self.lines) // batch_size\n\n def __len__(self):\n return self.steps\n\n def __getitem__(self, item):\n # 取一个batch的原始中文诗句\n batch_data = self.lines[item * self.batch_size: (item + 1) * self.batch_size]\n\n # 计算最大长度, add 3是因为[CLS]+s1+[SEP]+s2+[SEP]\n max_len = max([len(s.replace(\"\\t\", \"\")) for s in batch_data]) + 3\n tokens, targets, labels, segments, attn_masks = [], [], [], [], []\n\n for i in range(len(batch_data)):\n data_i = batch_data[i].split(\"\\t\") # data_i = [keywords, poem]\n key_token, key_target, key_label = self.random_word(data_i[0], None)\n poet_token, poet_target, poet_label = self.random_word(data_i[1], keywords=data_i[0])\n # 加入 [CLS] and [SEP]\n # token是mask过的原句子ids, target是原句子的ids, label是关注的mask单词\n key_token = [self.tokenizer.cls_token_id] + key_token + [self.tokenizer.eos_token_id]\n key_target = [self.tokenizer.cls_token_id] + key_target + [self.tokenizer.eos_token_id]\n key_label = [0] + key_label + [0] # 不是需要预测的token, label=0\n poet_token = poet_token + [self.tokenizer.sep_token_id]\n poet_target = poet_target + [self.tokenizer.sep_token_id]\n poet_label = poet_label + [0] # 不是需要预测的token, label=0\n # 设置token_type_ids\n segment = [0 for _ in range(len(key_token))] + [1 for _ in range(len(poet_token))]\n # set attention mask\n attn_mask = [1 for _ in range(len(key_token) + len(poet_token))]\n\n padding = [self.tokenizer.pad_token_id for _ in range(max_len - len(key_token) - len(poet_token))]\n tokens_i = key_token + poet_token + padding\n target_i = key_target + poet_target + padding\n labels_i = key_label + poet_label + padding\n segment.extend(padding)\n attn_mask += [0 for _ in range(len(padding))]\n\n assert len(tokens_i) == len(target_i) == len(labels_i) == len(segment) == len(attn_mask) == max_len, \"长度有错!\"\n\n tokens.append(tokens_i)\n targets.append(target_i)\n labels.append(labels_i)\n segments.append(segment)\n attn_masks.append(attn_mask)\n\n del key_token, key_target, key_label, poet_token, poet_target, poet_label # 清除内存\n\n tokens, targets = torch.tensor(tokens, dtype=torch.int64), torch.tensor(targets, dtype=torch.int64)\n labels, segments = torch.tensor(labels, dtype=torch.int64), torch.tensor(segments, dtype=torch.int64)\n attn_masks = torch.tensor(attn_masks, dtype=torch.int64)\n return tokens, targets, labels, segments, attn_masks\n\n def sentence2ids(self, sentence):\n tokens = self.tokenizer.tokenize(sentence)\n ids = self.tokenizer.convert_tokens_to_ids(tokens)\n return ids\n\n def get_trans_dataset(self):\n # 暂停使用原方案, 仅shuffle\n random.shuffle(self.lines)\n\n def get_corpus_line(self, item):\n if self.on_memory:\n return self.lines[item]\n else:\n line = self.file.__next__()\n while len(line) >= self.seq_len:\n line = self.file.__next__()\n if line is not None:\n self.file.close()\n self.file = open(self.corpus_path, \"r\", encoding=self.encoding)\n line = self.file.__next__()\n return line\n\n def random_word(self, sentence: str, keywords: str = None):\n \"\"\" :return mask_token_ids, token_ids, labels\"\"\"\n tokens = list(sentence.replace(\" \", \"\").strip())\n keywords = set(list(keywords.replace(\" \", \"\").strip())) if keywords is not None else None # 关键字\n targets, labels = [], []\n for i, token in enumerate(tokens):\n prob = random.random()\n if prob < 0.15:\n # 设置mask的概率为15%\n tokens[i] = self.tokenizer.mask_token_id # 设置该token为[MASK]\n targets.append(self.tokenizer.vocab.get(token, self.tokenizer.unk_token_id))\n if (keywords is not None and token in keywords) or token == \"|\":\n # 关键词的mask,是我们关心的信息\n labels.append(self.tokenizer.vocab.get(token, self.tokenizer.unk_token_id))\n else:\n labels.append(0)\n else:\n # 不满足mask情况\n tokens[i] = self.tokenizer.vocab.get(token, self.tokenizer.unk_token_id)\n targets.append(self.tokenizer.vocab.get(token, self.tokenizer.unk_token_id))\n if token == \"|\":\n labels.append(self.tokenizer.vocab.get(token, self.tokenizer.unk_token_id))\n else:\n labels.append(0)\n return tokens, targets, labels\n\n\nclass T5DataLoader:\n def __init__(\n self, corpus_path: str, tokenizer: BertTokenizer, seq_len=100,\n stride: int = None, batch_size=64, encoding=\"utf-8\", corpus_lines=None, on_memory=True\n ):\n \"\"\"\n 实现从语料库中获取某一batch数据\n :param corpus_path: 文本预料所在位置\n :param tokenizer: 分词器,使用bert tokenizer\n :param seq_len: 最长的序列长度\n :param stride: 步长\n :param batch_size:batch大小,默认设置64\n :param encoding: 默认\"utf-8\"\n :param corpus_lines: 提供语料行数,默认为None\n :param on_memory: 是否将预料全部存储于内存中\n \"\"\"\n self.tokenizer = tokenizer\n self.seq_len = seq_len\n self.stride = stride\n self.on_memory = on_memory\n self.corpus_lines = corpus_lines\n self.corpus_path = corpus_path\n self.encoding = encoding\n self.batch_size = batch_size\n\n self.lines = []\n self._create() # 导入数据\n\n random.shuffle(self.lines)\n self.steps = len(self.lines) // batch_size\n\n def __len__(self):\n return self.steps\n\n def __getitem__(self, item):\n # 取一个batch的原始中文诗句\n batch_data = self.lines[item * self.batch_size: (item + 1) * self.batch_size]\n len_in = max([len(s[0].replace(\" \", \"\").replace(\"[EOS]\", \"\")) for s in batch_data]) + 6\n len_out = max([len(s[1].replace(\" \", \"\").replace(\"[EOS]\", \"\")) for s in batch_data]) + 3\n # len_in, len_out = max([len(s[0]) for s in batch_data]) + 2, max([len(s[1]) for s in batch_data]) + 2\n tokens, targets, attns_x, attns_y = [], [], [], []\n for i in range(self.batch_size):\n x = self.tokenizer.encode(batch_data[i][0])\n attn_x = [1 for _ in range(len(x))]\n attn_x += [0 for _ in range(len_in - len(x))]\n x.extend([self.tokenizer.pad_token_id for _ in range(len_in - len(x))])\n\n y = self.tokenizer.encode(batch_data[i][1])\n attn_y = [1 for _ in range(len(y))]\n attn_y += [0 for _ in range(len_out - len(y))]\n y += [self.tokenizer.pad_token_id for _ in range(len_out - len(y))]\n # 保存\n assert len(x) == len(attn_x) == len_in and len(y) == len(attn_y) == len_out, \"长度不匹配!\"\n tokens.append(x)\n targets.append(y)\n attns_x.append(attn_x)\n attns_y.append(attn_y)\n tokens, targets = torch.tensor(tokens, dtype=torch.int64), torch.tensor(targets, dtype=torch.int64)\n attns_x, attns_y = torch.tensor(attns_x, dtype=torch.int64), torch.tensor(attns_y, dtype=torch.int64)\n # ids = self.tokenizer(text=batch_input, text_target=batch_target, padding=True, return_tensors=\"pt\")\n return tokens, targets, attns_x, attns_y\n\n def _create(self):\n with open(self.corpus_path, \"r\", encoding=self.encoding) as f:\n if self.on_memory:\n self.lines = []\n for line in tqdm(f, desc=\"Loading Data\", total=self.corpus_lines):\n json_line = json.loads(line)\n keywords = json_line[\"keywords\"].strip()\n poems = json_line[\"content\"].strip().split(\"|\") # 列表,一个元素是一句诗\n for i in range(len(poems)):\n x = \"关键词:\" + keywords + \" [EOS] \"\n self.lines.append((x + \" [EOS] \".join(poems[:i]) + \" [EOS] \", poems[i] + \" [EOS] \"))\n # self.lines.append((x + \"|\".join(poems[:i]), \"|\".join(poems[i:]))) # \\t分割输入和预测输出\n # self.lines = self.lines[-200000:]\n self.corpus_lines = len(self.lines)\n","repo_name":"weiji-Feng/Image2Poem","sub_path":"data_process/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10969,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"28"}
+{"seq_id":"11514281922","text":"import sys\n\nclass Queue(object):\n\n def __init__(self):\n self.my_queue = []\n\n def enqueue(self,val):\n self.my_queue.append(val)\n \n def dequeue(self):\n if self.my_queue:\n self.my_queue.pop(0)\n else:\n print(\"Your queue is empty......\")\n \n def show(self):\n print(self.my_queue)\n\n def exit(self):\n sys.exit()\n\nqe = Queue()\n\nwhile True:\n print(\"----------------------------------------------------\")\n print(\n \"Enqueue ---------- 1\\n\",\n \"Dequeue ---------- 2 \\n\",\n \"Show ---------- 3\\n\",\n \"Exit ---------- 4\\n\"\n )\n n = int(input(\"Choose above option : \"))\n\n if n == 1:\n ps_no = int(input(\"Enter number : \"))\n qe.enqueue(ps_no)\n elif n == 2:\n qe.dequeue()\n elif n == 3:\n qe.show()\n elif n == 4:\n qe.exit()\n\n\n# A simple implementation of Priority Queue\n# using Queue.\nclass PriorityQueue(object):\n\tdef __init__(self):\n\t\tself.queue = []\n\n\tdef __str__(self):\n\t\treturn ' '.join([str(i) for i in self.queue])\n\n\t# for checking if the queue is empty\n\tdef isEmpty(self):\n\t\treturn len(self.queue) == 0\n\n\t# for inserting an element in the queue\n\tdef insert(self, data):\n\t\tself.queue.append(data)\n\n\t# for popping an element based on Priority\n\tdef delete(self):\n\t\ttry:\n\t\t\tmax = 0\n\t\t\tfor i in range(len(self.queue)):\n\t\t\t\tif self.queue[i] > self.queue[max]:\n\t\t\t\t\tmax = i\n\t\t\titem = self.queue[max]\n\t\t\tdel self.queue[max]\n\t\t\treturn item\n\t\texcept IndexError:\n\t\t\tprint()\n\t\t\texit()\n\nif __name__ == '__main__':\n\tmyQueue = PriorityQueue()\n\tmyQueue.insert(12)\n\tmyQueue.insert(1)\n\tmyQueue.insert(14)\n\tmyQueue.insert(7)\n\tprint(myQueue)\t\t\t\n\twhile not myQueue.isEmpty():\n\t\tprint(myQueue.delete())","repo_name":"Pradip369/Core-Python","sub_path":"DSA/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"10338652478","text":"import logging\nimport re\nimport sys\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(\"pre_gen_project\")\n\n\ndef validate_plugin_name(name):\n matcher = re.compile(r\"^(?!tox)[-_a-zA-Z][-_a-zA-Z0-9]+$\")\n if not re.match(matcher, name):\n log.error('Invalid value for plugin_name \"%s\"', name)\n log.info(\"Must match %s\", matcher.pattern)\n log.info('Do not prepend plugin_name with \"tox\"!')\n sys.exit(1)\n\n\ndef validate_py_name(which, name):\n matcher = re.compile(r\"^[a-z][_a-z0-9]+$\")\n if not re.match(matcher, name):\n url = \"https://python.org/dev/peps/pep-0008/#package-and-module-names\"\n log.error('%s \"%s\" not PEP-8 compliant - see %s', which, name, url)\n log.info(\"Must match %s\", matcher.pattern)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n validate_plugin_name(\"{{cookiecutter.plugin_name}}\")\n validate_py_name(\"pkg_name\", \"{{cookiecutter.pkg_name}}\")\n validate_py_name(\"module_name\", \"{{cookiecutter.module_name}}\")\n","repo_name":"tox-dev/cookiecutter-tox-plugin","sub_path":"hooks/pre_gen_project.py","file_name":"pre_gen_project.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"}
+{"seq_id":"3970841521","text":"import os\nimport warnings\nfrom collections import namedtuple\n\nimport cv2\nimport numpy as np\nimport torch\nfrom scipy.ndimage.interpolation import rotate, zoom\nfrom torch.utils.data import Dataset\n\nwarnings.filterwarnings('ignore', '.*output shape of zoom.*')\n\n\nclass ClassifierDataset(Dataset):\n def __init__(self, records_path, config, phase='train', load_all_ln=False):\n assert (phase == 'train' or phase == 'val' or phase == 'test')\n # note: 不必再通过索引值访问tuple 可以看作一个字典通过名字进行访问 但其中的值是不能改变的\n # Item = namedtuple('Item', ['case_name', 'image_path', 'box', 'partition', 'is_nodule', 'left_or_right'])\n Item = namedtuple('Item', ['case_name', 'image_path', 'box', 'partition', 'scaling_binary_lable'])\n self.crop_size = config['crop_size']\n self.stride = config['stride']\n self.augtype = config['augtype']\n self.blacklist = config['blacklist']\n self.phase = phase\n self.items = []\n\n # data_root = config['data_root']\n data_root = config['processed_data_root']\n # 加载数据名称\n records = np.load(records_path)\n if phase != 'test':\n records = [f for f in records if (f not in self.blacklist)]\n for idx, record in enumerate(records):\n filename = os.path.join(data_root, '{}_clean.npy'.format(record))\n # get [z, y, x, d, p, is_positive, left_or_right]\n # get [x, y, z, d, p, scaling]\n labels = np.load(os.path.join(data_root, '{}_label.npy'.format(record)), allow_pickle=True)\n # if not load_all_ln:\n # # 筛选带有阴阳性标注的标签\n # labels = labels[~np.isnan(labels[:, 5])]\n # for label in labels:\n # left_or_right = 0 if np.isnan(label[6]) else label[6]\n # item = Item(\n # case_name=record, image_path=filename, box=label[0:4], partition=label[4], is_nodule=label[5],\n # left_or_right=left_or_right)\n # self.items.append(item)\n # else:\n # # 排除分区为空的淋巴结\n # labels = labels[~np.isnan(labels[:, 4])]\n # # 排除分区为歧义候选和结直肠系膜的淋巴结\n # labels = labels[(labels[:, 4] != 5) & (labels[:, 4] != 6)]\n # for label in labels:\n # # 如果未标注阴阳性,视为阴淋巴结\n # is_nodule = 0 if np.isnan(label[5]) else label[5]\n # left_or_right = 0 if np.isnan(label[6]) else label[6]\n # item = Item(\n # case_name=record, image_path=filename, box=label[0:4], partition=label[4], is_nodule=is_nodule,\n # left_or_right=left_or_right)\n # self.items.append(item)\n if not load_all_ln:\n for label in labels:\n item = Item(\n case_name=record, image_path=filename, box=label[0:4], partition=label[4], scaling_binary_lable=label[6])\n self.items.append(item)\n else:\n # 排除分区为空的淋巴结\n labels = labels[~np.isnan(labels[:, 4])]\n # 排除分区为歧义候选和结直肠系膜的淋巴结\n labels = labels[(labels[:, 4] != 21) & (labels[:, 4] < 14) & (labels[:, 4] > 19)]\n for label in labels:\n item = Item(\n case_name=record, image_path=filename, box=label[0:4], partition=label[4], scaling_binary_lable=label[6])\n self.items.append(item)\n\n self.crop = Crop(config, phase)\n\n def __getitem__(self, idx):\n if self.phase != 'test':\n item = self.items[idx]\n img = np.load(item.image_path)\n target = item.box\n # is_nodule = int(item.is_nodule)\n scaling_binary_lable = int(item.scaling_binary_lable)\n crop_img, mask_img = self.crop(img, target)\n input = np.concatenate([crop_img, mask_img], axis=0)\n input = (input.astype(np.float32) - 128) / 128\n if self.phase == 'train':\n input = augment(input,\n is_flip=self.augtype['flip'], is_rotate=self.augtype['rotate'],\n is_swap=self.augtype['swap'])\n\n # 处理掩码经过增广后的非整数值\n input[1] = np.round(input[1])\n sample = {\n 'image': torch.from_numpy(input).float(),\n 'target': scaling_binary_lable\n # 'target': is_nodule\n }\n return sample\n else:\n item = self.items[idx]\n img = np.load(item.image_path)\n target = item.box\n # is_nodule = int(item.is_nodule)\n scaling_binary_lable = int(item.scaling_binary_lable)\n partition = int(item.partition)\n # left_or_right = int(item.left_or_right)\n case_name = item.case_name\n crop_img, mask_img = self.crop(img, target)\n input = np.concatenate([crop_img, mask_img], axis=0)\n input = (input.astype(np.float32) - 128) / 128\n # 处理掩码经过增广后的非整数值\n input[1] = np.round(input[1])\n sample = {\n 'image': torch.from_numpy(input).float(),\n 'target': scaling_binary_lable,\n # 'target': is_nodule,\n 'box': target,\n 'partition': partition,\n 'case_name': case_name\n # 'left_or_right': left_or_right\n }\n return sample\n\n def __len__(self):\n return len(self.items)\n\n\nclass Crop():\n def __init__(self, config, phase):\n self.crop_size = config['crop_size']\n self.scaleLim = config['scaleLim']\n self.radiusLim = config['radiusLim']\n self.jitter_range = config['jitter_range']\n self.isScale = config['augtype']['scale'] and phase == 'train'\n self.stride = config['stride']\n self.filling_value = config['filling_value']\n self.phase = phase\n\n def __call__(self, imgs, target):\n if self.isScale:\n radiusLim = self.radiusLim\n scaleLim = self.scaleLim\n scaleRange = [np.min([np.max([(radiusLim[0] / target[3]), scaleLim[0]]), 1]),\n np.max([np.min([(radiusLim[1] / target[3]), scaleLim[1]]), 1])]\n scale = np.random.rand() * (scaleRange[1] - scaleRange[0]) + scaleRange[0]\n crop_size = (np.array(self.crop_size).astype('float') / scale).astype('int')\n else:\n crop_size = np.array(self.crop_size).astype('int')\n if self.phase == 'train':\n jitter_range = target[3] * self.jitter_range\n jitter = (np.random.rand(3) - 0.5) * jitter_range\n else:\n jitter = 0\n start = (target[:3] - crop_size / 2 + jitter).astype('int')\n pad = [[0, 0]]\n for i in range(3):\n if start[i] < 0:\n leftpad = -start[i]\n start[i] = 0\n else:\n leftpad = 0\n if start[i] + crop_size[i] > imgs.shape[i + 1]:\n rightpad = start[i] + crop_size[i] - imgs.shape[i + 1]\n else:\n rightpad = 0\n pad.append([leftpad, rightpad])\n imgs = np.pad(imgs, pad, 'constant', constant_values=self.filling_value)\n crop = imgs[:,\n start[0]:start[0] + crop_size[0],\n start[1]:start[1] + crop_size[1],\n start[2]:start[2] + crop_size[2]]\n # generate target mask image\n new_target = target.copy()\n new_target[0:3] = target[0:3] - start\n target_img = generate_mask(new_target, crop_size)\n\n if self.isScale:\n crop = zoom(crop, [1, scale, scale, scale], order=1)\n target_img = zoom(target_img, [1, scale, scale, scale], order=1)\n newpad = self.crop_size[0] - crop.shape[1:][0]\n if newpad < 0:\n crop = crop[:, :-newpad, :-newpad, :-newpad]\n target_img = target_img[:, :-newpad, :-newpad, :-newpad]\n elif newpad > 0:\n pad2 = [[0, 0], [0, newpad], [0, newpad], [0, newpad]]\n crop = np.pad(crop, pad2, 'constant', constant_values=self.filling_value)\n target_img = np.pad(target_img, pad2, 'constant', constant_values=self.filling_value)\n\n return crop, target_img\n\n\ndef augment(sample, is_flip=True, is_rotate=True, is_swap=True):\n if is_rotate:\n angle = np.random.rand() * 180\n sample = rotate(sample, angle, axes=(2, 3), reshape=False)\n if is_swap:\n if sample.shape[1] == sample.shape[2] and sample.shape[1] == sample.shape[3]:\n # note: np.random.permutation生成随机序列\n axisorder = np.random.permutation(3)\n # note: np.transpose 默认情况下,反转维度,否则根据给定的值对轴进行排列\n sample = np.transpose(sample, np.concatenate([[0], axisorder + 1]))\n\n if is_flip:\n flipid = np.array([np.random.randint(2), np.random.randint(2), np.random.randint(2)]) * 2 - 1\n # note: 返回和传入的数组类似的内存中连续的数组\n sample = np.ascontiguousarray(sample[:, ::flipid[0], ::flipid[1], ::flipid[2]])\n return sample\n\n\ndef generate_mask(target, crop_size, r_margin=0):\n z, y, x, d = target\n # 淋巴结直径\n r = target[-1] / 2 + r_margin\n # 淋巴结z轴的范围\n lim = list(range(int(np.round(z - r)), int(np.round(z + r))))\n mask = np.zeros(crop_size, dtype='uint8')\n\n for idx in range(mask.shape[0]):\n if idx in lim:\n center = (int(round(x)), int(round(y)))\n if idx == z:\n radius = int(np.round(r))\n else:\n radius_content = np.round(r) ** 2 - np.abs(z - idx) ** 2\n if radius_content < 0:\n radius_content = 0\n radius = int(np.sqrt(radius_content))\n mask[idx] = cv2.circle(mask[idx], center, radius, (255, 255, 255), -1)\n\n # mask[mask == 255] = 1\n return mask[np.newaxis, ...]\n","repo_name":"shuaihuachen/MultiLNsMatch","sub_path":"datasets/classifier_dataset.py","file_name":"classifier_dataset.py","file_ext":"py","file_size_in_byte":10404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"24008011130","text":"import torch\n\nfrom transformers import BertTokenizer as TB_tokenizer\nfrom transformers import BertModel as TB_model\nfrom pytorch_pretrained_bert import BertModel as PB_model\nfrom pytorch_pretrained_bert import BertTokenizer as PB_tokenizer\n\nclass BertExtractorFromWWW(object):\n def __init__(self, cuda=False, cuda_num=None):\n self.tokenizer = TB_tokenizer.from_pretrained('/data2/lrc/bert_cache/pytorch')\n self.model = TB_model.from_pretrained('/data2/lrc/bert_cache/pytorch')\n self.model.eval()\n\n if cuda:\n self.cuda = True\n self.cuda_num = cuda_num\n self.model = self.model.cuda(self.cuda_num)\n else:\n self.cuda = False\n\n def extract(self, text):\n input_ids = torch.tensor(self.tokenizer.encode(text)).unsqueeze(0) # Batch size 1\n print('In TB:')\n print(input_ids[0])\n print(list(map(lambda x: self.tokenizer._convert_id_to_token(x.item()), input_ids[0])))\n if self.cuda:\n input_ids = input_ids.cuda(self.cuda_num)\n\n with torch.no_grad():\n outputs = self.model(input_ids)\n \n # last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n \n sequence_output = outputs[0]\n pooled_output = outputs[1]\n return sequence_output, pooled_output\n\n\nclass BertExtractor(object):\n def __init__(self, gpu_id=None):\n self.device = torch.device(f'cuda:{gpu_id}') if gpu_id is not None else None\n # self.model = BertModel.from_pretrained('bert-base-uncased') \n self.model = PB_model.from_pretrained('/data2/lrc/bert_cache/pytorch')\n self.model.to(self.device)\n self.model.eval()\n # self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n self.tokenizer = PB_tokenizer.from_pretrained('/data2/lrc/bert_cache/pytorch')\n \n def extract_feat(self, text):\n ids = self.bert_tokenize(text)\n ids = [101] + ids + [102]\n print('In PB:')\n print(ids)\n print(self.tokenizer.convert_ids_to_tokens(ids))\n\n ids = torch.tensor(ids).unsqueeze(0)\n if self.device:\n ids = ids.to(self.device)\n with torch.no_grad():\n feat = self.model(ids)[0][0]\n return feat.squeeze().cpu().numpy()\n\n def bert_tokenize(self, text):\n ids = []\n for word in text.strip().split():\n ws = self.tokenizer.tokenize(word)\n if not ws:\n # some special char\n continue\n ids.extend(self.tokenizer.convert_tokens_to_ids(ws))\n return ids\n \n\nif __name__ == '__main__':\n sentence = 'With the most perfect poise?'\n pb_model = BertExtractor(gpu_id=0)\n tb_model = BertExtractorFromWWW(cuda=True, cuda_num=1)\n print(\"Input sentence:\")\n print(sentence)\n pb_feat = pb_model.extract_feat(sentence)\n tb_feat, _ = tb_model.extract(sentence)\n tb_feat = tb_feat[0].cpu().numpy()\n print('pb:', pb_feat.shape)\n print('tb:', tb_feat.shape)\n # print(pb_feat==tb_feat)\n print('---------------')\n print(pb_feat)\n print('---------------')\n print(tb_feat)\n print('---------------')\n","repo_name":"AIM3-RUC/MEmoBert","sub_path":"preprocess/iemocap/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"}
+{"seq_id":"20537459217","text":"from CreditCard import Credit_Card\n\nif __name__ == '__main__':\n wallet = []\n wallet.append(Credit_Card('John Lee', 'DBS', '5391 0375 9387 5309', 2500))\n wallet.append(Credit_Card('John Lee', 'OCBC', '3485 0399 3395 1954', 3500))\n wallet.append(Credit_Card('John Lee', 'Maybank', '5391 0375 9387 5309', 5000))\n\n for val in range(1, 17):\n wallet[0].charge(val)\n wallet[1].charge(2*val)\n wallet[2].charge(3*val)\n\n for c in range(3):\n print('Customer = ', wallet[c].get_customer())\n print('Bank = ', wallet[c].get_bank())\n print('Account = ', wallet[c].get_account())\n print('Limit = ', wallet[c].get_limit())\n print('Balance = ', wallet[c].get_balance())\n\n while wallet[c].get_balance() > 100:\n wallet[c].make_payment(100)\n print('New balance = ', wallet[c].get_balance())\n\n print()","repo_name":"Xskullibur/Diploma-Data-Structures-and-Algorithm","sub_path":"Practice/Practical 1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"8213872585","text":"from tkinter import *\r\n\r\nclass MatrixCalculator:\r\n\r\n def __init__(self, master):\r\n self.master = master\r\n self.master.title('Calculate SVD and PD')\r\n self.master[\"bg\"] = '#F5EFF6'\r\n\r\n self.window_width = 500\r\n self.window_height = 500\r\n self.screen_width = self.master.winfo_screenwidth()\r\n self.screen_height = self.master.winfo_screenheight()\r\n self.x_position = (self.screen_width - self.window_width) // 2\r\n self.y_position = (self.screen_height - self.window_height) // 2\r\n\r\n self.master.geometry(f'{self.window_width}x{self.window_height}+{self.x_position}+{self.y_position}')\r\n\r\n self.welcome_label = Label(self.master, text='Welcome!', font=('Comic Sans Ms', 50), fg='#bb88d3', bg='#F5EFF6')\r\n self.welcome_label.place(x=125, y=90)\r\n\r\n self.start_button = Button(self.master, text='Start', font=('Comic Sans Ms', 15), fg='#bb88d3', bg='white', command=self.choose)\r\n self.start_button.place(x=200, y=180)\r\n\r\n self.result = StringVar()\r\n self.frame1 = None\r\n self.frame2 = None\r\n\r\n def go_back(self):\r\n if self.frame1:\r\n self.frame1.destroy()\r\n self.frame1 = None\r\n\r\n if self.frame2:\r\n self.frame2.place_forget()\r\n self.welcome_label.place(x=125, y=90)\r\n self.start_button.place(x=200, y=180)\r\n\r\n def calculate(self, method):\r\n\r\n def solve(self):\r\n pass\r\n\r\n if self.frame1:\r\n self.frame1.destroy()\r\n\r\n size = int(self.size_var.get()[0])\r\n entries = []\r\n\r\n cell_size = 50\r\n cell_padding = 10\r\n dx = 120\r\n dy = 150\r\n\r\n for i in range(size):\r\n for j in range(size):\r\n x = j * cell_size + cell_padding + dx\r\n y = i * cell_size + cell_padding + dy\r\n\r\n entry = Entry(self.frame2, width=3)\r\n entry.place(x=x, y=y)\r\n entries.append(entry)\r\n\r\n matrix_label = Label(self.frame2, text=f\"Calculate {method} for {size}x{size} matrix\", font=('Comic Sans Ms', 14), fg='#bb88d3', bg='#F5EFF6')\r\n matrix_label.place(x=40, y=300)\r\n\r\n calculate_button = Button(self.frame2, text=\"Calculate\", font=('Comic Sans Ms', 14), fg='#bb88d3', command=solve)\r\n calculate_button.place(x=150, y=350)\r\n\r\n\r\n\r\n def select(self, method):\r\n if self.frame1:\r\n self.frame1.destroy()\r\n\r\n self.result.set(f\"Performing {method}...\")\r\n\r\n self.frame2 = Frame(self.master, bg='#F5EFF6', width=500, height=500)\r\n self.frame2.place(x=50, y=20)\r\n\r\n select_size_label = Label(self.frame2, text='Please select size of matrix!', font=('Comic Sans Ms', 24), fg='#bb88d3', bg='#F5EFF6')\r\n select_size_label.place(x=30, y=30)\r\n\r\n self.size_var = StringVar(value=\"2x2\")\r\n\r\n option_menu = OptionMenu(self.frame2, self.size_var, \"2x2\", \"3x3\")\r\n option_menu.place(x=150, y=75)\r\n\r\n select1_button = Button(self.frame2, text=\"Select\", font=('Comic Sans Ms', 12), fg='#bb88d3', command=lambda: self.calculate(method))\r\n select1_button.place(x=250, y=75)\r\n\r\n def choose(self):\r\n self.welcome_label.place_forget()\r\n self.start_button.place_forget()\r\n\r\n self.frame1 = Frame(self.master, bg='#F5EFF6', width=500, height=500)\r\n self.frame1.place(x=50, y=20)\r\n\r\n choose_label = Label(self.frame1, text='Please select one!', font=('Comic Sans Ms', 24), fg='#bb88d3', bg='#F5EFF6')\r\n choose_label.place(x=100, y=80)\r\n\r\n options = [\"Singular value decomposition\", \"Polar decomposition\"]\r\n var = StringVar(value=options[0])\r\n\r\n option_menu = OptionMenu(self.frame1, var, *options)\r\n option_menu.place(x=100, y=130)\r\n\r\n self.back_button = Button(self.frame1, text=\"Main menu\", font=('Comic Sans Ms', 12), fg='#bb88d3', command=self.go_back)\r\n self.back_button.place(x=10, y=300)\r\n\r\n self.result_label = Label(self.frame1, textvariable=self.result)\r\n self.result_label.place(x=50, y=150)\r\n\r\n select_button = Button(self.frame1, text=\"Select\", font=('Comic Sans Ms', 12), fg='#bb88d3', command=lambda: self.select(var.get()))\r\n select_button.place(x=300, y=300)\r\n\r\nroot = Tk()\r\napp = MatrixCalculator(root)\r\nroot.mainloop()\r\n\r\n","repo_name":"aissataikyzy/pp3","sub_path":"end/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"32909619332","text":"import unittest\n#from BSTestRunner import BSTestRunner\nimport HTMLTestRunner\nimport time\nimport logging\nimport sys\nfrom Common.sendEmail import SendEmail\npath = r'C:Users\\yuanyuan.ni\\PycharmProjects\\AppTest'\nsys.path.append(path)\nsys.path.append('./Common')\nsys.path.append('./test_case')\nsys.path.append('./report')\n\n#指定测试用例和测试报告的路径\ntest_dir = '../test_case'\nreport_dir = '../report'\n\nif __name__=='__main__':\n\n #加载测试用例\n discover = unittest.defaultTestLoader.discover(test_dir, pattern='test_*.py')\n # 定义报告的文件格式\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\")\n test_report = report_dir\n filename = report_dir + '/' + now + ' test_report.html'\n print(filename)\n #运行用例并生成测试报告\n#with open(report_name, 'wb') as f:\n fp=open(filename,'wb')\n # runner = BSTestRunner(stream=fp, title=\"SideChef Test Report\", description=\"SideChef Andriod app Test Report\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'SideChef Test Report', description=u'SideChef Andriod app Test Report')\n logging.info(\"start run testcase...\")\n runner.run(discover)\n fp.close()\n # 向指定邮箱发送测试报告的html文件\n time.sleep(6)\n # 查找最新生成的测试报告地址\n new_report = SendEmail.new_report(test_report)\n # 自动发送邮件\n SendEmail().send_email(new_report)\n","repo_name":"Tinanyy/nyy","sub_path":"test_run/run_login.py","file_name":"run_login.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"4645899712","text":"from __future__ import absolute_import\nfrom __future__ import division\n\nimport errno\nimport logging\nimport os\nimport time\n\nfrom vdsm.common import constants\nfrom vdsm.common import properties\nfrom vdsm.common import systemctl\nfrom vdsm.common import systemd\nfrom vdsm.common.time import monotonic_time\n\nfrom . import constants as sc\nfrom . import exception as se\nfrom . sdc import sdCache\n\nQEMU_NBD = \"/usr/bin/qemu-nbd\"\nRUN_DIR = os.path.join(constants.P_VDSM_RUN, \"nbd\")\n\nlog = logging.getLogger(\"storage.nbd\")\n\n\nclass Timeout(Exception):\n pass\n\n\nclass ServerConfig(properties.Owner):\n\n sd_id = properties.UUID(required=True)\n img_id = properties.UUID(required=True)\n vol_id = properties.UUID(required=True)\n readonly = properties.Boolean(default=False)\n discard = properties.Boolean(default=False)\n\n def __init__(self, config):\n self.sd_id = config.get(\"sd_id\")\n self.img_id = config.get(\"img_id\")\n self.vol_id = config.get(\"vol_id\")\n self.readonly = config.get(\"readonly\")\n self.discard = config.get(\"discard\")\n\n\ndef start_server(server_id, config):\n cfg = ServerConfig(config)\n dom = sdCache.produce_manifest(cfg.sd_id)\n vol = dom.produceVolume(cfg.img_id, cfg.vol_id)\n\n if vol.isShared() and not cfg.readonly:\n raise se.SharedVolumeNonWritable(vol)\n\n cmd = [QEMU_NBD]\n\n sock = _socket_path(server_id)\n service = _service_name(server_id)\n\n cmd.append(\"--socket\")\n cmd.append(sock)\n\n cmd.append(\"--format\")\n cmd.append(sc.fmt2str(vol.getFormat()))\n\n cmd.append(\"--persistent\")\n\n # Use empty export name for nicer url: \"nbd:unix:/path\" instead of\n # \"nbd:unix:/path:exportname=name\".\n cmd.append(\"--export-name=\")\n\n cmd.append(\"--cache=none\")\n cmd.append(\"--aio=native\")\n\n if cfg.readonly:\n cmd.append(\"--read-only\")\n elif cfg.discard:\n cmd.append(\"--discard=unmap\")\n\n cmd.append(vol.getVolumePath())\n\n _create_rundir()\n\n log.info(\"Starting transient service %s, serving volume %s/%s via unix \"\n \"socket %s\",\n service, cfg.sd_id, cfg.vol_id, sock)\n\n systemd.run(cmd, unit=service, uid=os.getuid(), gid=os.getgid())\n\n if not _wait_for_socket(sock, 1.0):\n raise Timeout(\"Timeout starting NBD server {}: {}\"\n .format(server_id, config))\n\n return \"nbd:unix:\" + sock\n\n\ndef stop_server(server_id):\n service = _service_name(server_id)\n\n # systemctl.stop() does not have a way to detect that a server was not\n # running, so we need to check the service state before we stop it. This\n # is racy, but should be fine since service names are random and we start\n # them only once.\n\n info = systemctl.show(service, properties=(\"LoadState\",))\n if info and info[0][\"LoadState\"] == \"not-found\":\n log.info(\"Transient service %s is not running\", service)\n return\n\n log.info(\"Stopping transient service %s\", service)\n systemctl.stop(service)\n\n\ndef _service_name(server_id):\n return \"vdsm-nbd-{}.service\".format(server_id)\n\n\ndef _socket_path(server_id):\n return os.path.join(RUN_DIR, server_id + \".sock\")\n\n\ndef _wait_for_socket(sock, timeout):\n start = monotonic_time()\n elapsed = 0.0\n\n while elapsed < timeout:\n if os.path.exists(sock):\n log.debug(\"Waited for socket %.3f seconds\", elapsed)\n return True\n # Socket is usually availble after 20 milliseconds.\n time.sleep(0.02)\n elapsed = monotonic_time() - start\n\n return False\n\n\ndef _create_rundir():\n try:\n # /run/vdsm must exists, created by systemd. Do not try to recreate it,\n # hiding the fact that it was missing.\n os.mkdir(RUN_DIR)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n else:\n log.info(\"Created %s\", RUN_DIR)\n","repo_name":"chipmap/vdsm","sub_path":"lib/vdsm/storage/nbd.py","file_name":"nbd.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"28"}
+{"seq_id":"19082303881","text":"import sys\n\nn = int(input())\narr = list(map(int, sys.stdin.readline().split()))\nresult = [-1 for i in range(n)]\nl = []\n\nfor i in range(n):\n while l:\n if arr[l[-1]] < arr[i]:\n result[l.pop()] = arr[i]\n else:\n l.append(i)\n break\n if not l:\n l.append(i)\n\nprint(*result) # 공백 기준 리스트 원소 출력\n\n","repo_name":"jsl1113/LeeJiSeon","sub_path":"Coding_Test_study/python/17298.py","file_name":"17298.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"14352084056","text":"from typing import Union, Dict\n\nfrom mewpy.germ.lp import ConstraintContainer, VariableContainer, LinearProblem\nfrom mewpy.germ.models import Model, MetabolicModel, RegulatoryModel\nfrom mewpy.solvers.solution import Solution\nfrom mewpy.solvers.solver import VarType, Solver\n\n\nclass FBA(LinearProblem):\n\n def __init__(self,\n model: Union[Model, MetabolicModel, RegulatoryModel],\n solver: Union[str, Solver, None] = None,\n build: bool = False,\n attach: bool = False):\n \"\"\"\n Flux Balance Analysis (FBA) of a metabolic model. Regular implementation of a FBA for a metabolic model.\n\n For more details consult: https://dx.doi.org/10.1038%2Fnbt.1614\n\n :param model: a MetabolicModel, RegulatoryModel or GERM model. The model is used to retrieve\n variables and constraints to the linear problem\n :param solver: A Solver, CplexSolver, GurobiSolver or OptLangSolver instance.\n Alternatively, the name of the solver is also accepted.\n The solver interface will be used to load and solve a linear problem in a given solver.\n If none, a new solver is instantiated. An instantiated solver may be used,\n but it will be overwritten if build is true.\n :param build: Whether to build the linear problem upon instantiation. Default: False\n :param attach: Whether to attach the linear problem to the model upon instantiation. Default: False\n \"\"\"\n super().__init__(model=model, solver=solver, build=build, attach=attach)\n\n def _build_mass_constraints(self):\n gene_state = {gene.id: max(gene.coefficients) for gene in self.model.yield_genes()}\n\n constraints = {metabolite.id: ConstraintContainer(name=metabolite.id, lbs=[0.0], ubs=[0.0], coefs=[{}])\n for metabolite in self.model.yield_metabolites()}\n variables = {}\n\n for reaction in self.model.yield_reactions():\n if reaction.gpr.is_none:\n lb, ub = reaction.bounds\n\n else:\n res = reaction.gpr.evaluate(values=gene_state)\n if not res:\n lb, ub = 0.0, 0.0\n else:\n lb, ub = reaction.bounds\n\n variable = VariableContainer(name=reaction.id, sub_variables=[reaction.id],\n lbs=[float(lb)], ubs=[float(ub)], variables_type=[VarType.CONTINUOUS])\n variables[reaction.id] = variable\n\n for metabolite, stoichiometry in reaction.stoichiometry.items():\n constraints[metabolite.id].coefs[0][reaction.id] = stoichiometry\n\n self.add_variables(*variables.values())\n self.add_constraints(*constraints.values())\n return\n\n def _build(self):\n \"\"\"\n It builds the linear problem from the model. The linear problem is built from the model\n variables and constraints. The linear problem is then loaded into the solver.\n :return:\n \"\"\"\n if self.model.is_metabolic():\n # mass balance constraints and reactions' variables\n self._build_mass_constraints()\n\n self._linear_objective = {var.id: value for var, value in self.model.objective.items()}\n self._minimize = False\n\n return\n\n def _optimize(self, solver_kwargs: Dict = None, **kwargs) -> Solution:\n \"\"\"\n It optimizes the linear problem. The linear problem is solved by the solver interface.\n :param solver_kwargs: A dictionary of keyword arguments to be passed to the solver.\n :return: A Solution instance.\n \"\"\"\n return self.solver.solve(**solver_kwargs)\n","repo_name":"BioSystemsUM/MEWpy","sub_path":"src/mewpy/germ/analysis/fba.py","file_name":"fba.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"28"}
+{"seq_id":"43555555267","text":"import pandas as pd\ndataFrame = pd.read_csv(\"./wordDataset.csv\")\n# dataFrame = pd.read_csv(\"./test.csv\")\n# ***记录反向映射表(根据类别找index)\nclassDict = {}\n# ***这个是所有的词频list 纵坐标 类别号 横坐标 单词编号\nwordList = [[0]*(len(dataFrame.keys())) for _ in range(len(dataFrame[\"Unnamed: 0\"]))]\nwordDict = dataFrame.to_dict()\nkeys = list(wordDict.keys())\n# print(keys)\nprint(wordDict[\"Unnamed: 0\"])\n\nfor i in wordDict[\"Unnamed: 0\"]:\n classDict[wordDict[\"Unnamed: 0\"][i]] = i\n# ***正向记录映射表(根据index找类别)\nwordD = wordDict[\"Unnamed: 0\"]\n# print(classDict)\nwords = {}\nindex = 0\nfor word in keys[1:]:\n words[word] = index\n index += 1\n# print(words)\n# 遍历所有的单词\nfor i in range(1, len(keys)):\n items = wordDict[keys[i]]\n # 遍历所有的 类别\n # print(items)\n for j in range(len(items)):\n wordList[j][i] = items[j]\n# print(wordList)","repo_name":"suilin0432/VanillaBayesAndKNN","sub_path":"dataRead.py","file_name":"dataRead.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"74559151113","text":"#!/usr/bin/env python\n# coding=utf-8\nimport socket\nimport argparse\nimport time\nimport SocketServer\n\nparser = argparse.ArgumentParser(description='Manual to this tcp_server.py')\nparser.add_argument('-6', '--ipv6', default=False, help=\"ipv6 Switch\", action='store_true')\nparser.add_argument('-p', '--port', type=int, help=\"server port\")\nargs = parser.parse_args()\n\nclass MyServer(SocketServer.BaseRequestHandler):\n\n def handle(self):\n while True:\n data = self.request.recv(1024)[:30]\n self.request.sendall(data)\n if data == '' or \"short\" in data:\n break\n time.sleep(1)\n self.request.close()\n\nif __name__ == '__main__':\n if args.ipv6:\n SocketServer.TCPServer.address_family = socket.AF_INET6\n server = SocketServer.ThreadingTCPServer(('', args.port), MyServer)\n print(\"Start listening at %d\" % args.port)\n server.serve_forever()\n","repo_name":"jneeee/qos_tool","sub_path":"qos_server.py","file_name":"qos_server.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"1914532726","text":"# -*- coding: utf-8 -*-\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Created on 21 Mar 2018\n#\n# @author: rhamilton\n\n\"\"\"Tasks that Wadsworth nominally does.\n\nThese are complex actions that usually involve Yvette, in addition\nto some extra processing logic to make sure things are all ok.\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport datetime as dt\n\nfrom ligmos import utils\nfrom .. import yvette\n\n\ndef buttleData(eSSH, baseYcmd, args, iobj):\n \"\"\"\n \"\"\"\n # For debugging alarms\n startt = dt.datetime.utcnow()\n\n # Rename to control line length\n yR = yvette.remote\n\n # Need to make sure our destination directory actually exists first\n ldircheck = utils.files.checkDir(iobj.destdir)\n if ldircheck[0] is False:\n print(\"--> Local destination directory unreachable! Aborting!\")\n # return None\n\n print(\"--> Defining custom action set for buttling files...\")\n\n # Get the list of \"old\" files on the instrument host\n getNew = utils.common.processDescription(func=yR.commandYvetteSimple,\n name='GetNewDirs',\n timedelay=3.,\n maxtime=60.,\n needSSH=True,\n args=[eSSH, baseYcmd, args,\n iobj, 'findnew'],\n kwargs={'debug': args.debug})\n\n # Actually get the dir list on Yvette's machine\n ans, _ = utils.common.instAction(getNew)\n\n # rsync each directory, one by one so we can gather the stats\n for each in ans['DirsNew'][1]:\n # Now to start the checking process, multi-stage\n print(\"--> rsyncing remote %s:%s to local %s\" % (iobj.host,\n each, iobj.destdir))\n\n rsyncsrc = \"%s@%s:%s\" % (iobj.user, iobj.host, each)\n print(rsyncsrc)\n ret = utils.rsyncer.subpRsync(rsyncsrc, iobj.destdir, timeout=0)\n print(ret)\n","repo_name":"LowellObservatory/DataServants","sub_path":"dataservants/wadsworth/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"22420397506","text":"# Author: Nikolay Manchev \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom .api import DominoAPISession\nfrom tzlocal import get_localzone\n\n\ndef get_hardware_tier_id(tier_name):\n \"\"\"Gets a hardware tier id from its human-readable name\n\n Parameters\n ----------\n tier_name : str\n The human-readable name of the hardware tier, such as \"Free\", \"Small\", or \"Medium\". \n\n Returns\n -------\n hw_tier_id : str\n Domino hardware tier ID (e.g. small-k8s, large-k8s, gpu-small-k8s, etc.)\n \"\"\"\n hw_tier_id = None\n\n if tier_name:\n domino_api = DominoAPISession.instance()\n\n for hardware_tier in domino_api.hardware_tiers_list():\n if tier_name.lower() == hardware_tier[\"hardwareTier\"][\"name\"].lower():\n hw_tier_id = hardware_tier[\"hardwareTier\"][\"id\"]\n\n return hw_tier_id\n\n\ndef get_default_hardware_tier():\n \"\"\"Gets the default HW tier for the project. The project ID is automatically \n fetched from the authentication session.\n\n Returns\n -------\n hw_tier_id : str\n Domino hardware tier ID (e.g. small-k8s, large-k8s, gpu-small-k8s, etc.)\n \"\"\"\n domino_api = DominoAPISession.instance()\n project_id = domino_api.project_id\n\n url = domino_api._routes.host + \\\n \"/v4/projects/\" + project_id + \"/hardwareTiers\"\n\n result = domino_api.request_manager.get(url).json()\n \n # As a fallback, select the first HW tier available to the project\n default_tier_id = result[0][\"hardwareTier\"][\"id\"]\n\n for tier in result:\n if tier[\"hardwareTier\"][\"isDefault\"]:\n # Found the default HW tier\n default_tier_id = tier[\"hardwareTier\"][\"id\"]\n\n return default_tier_id\n\ndef get_local_timezone():\n # Returns the local timezone\n local_tz = get_localzone()\n return str(local_tz)\n","repo_name":"dominodatalab/reference-project-domino-orchestrator","sub_path":"dom_orch/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"40266191070","text":"originalNumber = int(input(\"Please enter the number: \"))\nnumber1 = originalNumber\nnumber2 = originalNumber\na = 0\nwhile number1 != 0:\n number1=number1//10\n a += 1\nfor each in range(1,a+1):\n displayPrompt = number2//(10**(a-1))\n number2 = number2 - (displayPrompt*(10**(a-1)))\n print(\"Display prompt\", displayPrompt)\n a -= 1\n\n \n \n \n ","repo_name":"ismailsinansahin/PythonAssignments","sub_path":"Assignment_05/Question_09.py","file_name":"Question_09.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"9251814463","text":"import time\n\n\n#every constants which are used in framework\nHOST_SERVER = '192.168.100.9'\nHOST_CLIENT = '192.168.100.10'\nEXIT_SUCCESS = 0\nSTDERR_EMPTY = ''\nNAME_LOG_FILE = 'log_file.log'\nSERVICES = ['rpcbind', 'nfs-server', 'nfs-lock', 'nfs-idmap']\nFIREWALL_PORTS = ['111', '54302', '20048', '2049', '46666', '42955', '875']\nEXPORTS_PATH = '/etc/exports'\nTYPE_OF_MACHINES = ['server', 'client']\nNFS_SHARE_NAME = '/home/nfs_share_{}'.format(str(int(time.time())))\nTEST_SUCCESS = 0\nTEST_NOT_SUCCESS = 0","repo_name":"SokolJam/First_test_framework","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"43341178326","text":"\"\"\"\nПри помощи функции get_answer() отвечать на вопросы пользователя в\nask_user(), пока он не скажет \"Пока !\"\n\"\"\"\nanswers = {'Привет': 'Привет !',\n 'Как твои дела ?': 'Нормально',\n 'Что делаешь ? ':'Программирую'}\n\ndef get_answer(question):\n return answers.get(question)\ndef ask_user_2():\n while True:\n user_input = input('Поговори со мной : ').capitalize()\n if user_input == 'Пока !'.capitalize():\n print('Пока !')\n break\n else:\n print(get_answer(user_input))\nif __name__=='__main__':\n ask_user_2()\n","repo_name":"Predatorevil666/Week_2","sub_path":"week_2_option_4.py","file_name":"week_2_option_4.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"31718944557","text":"\ndef mitosis(array, left, right, r1):\n print(1, r1+1, left+1, r1+1, right+1)\n mid = (left+right)//2\n x = int(input())\n if x > 0 and left!=right and x!=right+1-left:\n mitosis(array, left,mid, r1)\n mitosis(array, mid+1, right, r1)\n elif x== right+1-left:\n for i in range(left,right+1):\n array[r1][i]=1\n # print(\"All1\") \n return \n elif left==right:\n array[r1][left]=x\n # print(\"left==right\")\n return\n elif x==0 :\n for i in range(left,right+1):\n array[r1][i]=0\n # print(\"x==0\") \n return \n if x == -1:\n exit()\n if left>right or left>n or right > n:\n # print(\"left>right\")\n return\n\nt = int(input())\nwhile t != 0:\n t -= 1\n n, p = map(int, input().strip().split())\n r1 = 0\n r2 = 0\n c1 = 0\n c2 = n-1\n array = [[0 for i in range(n)] for j in range(n)]\n for i in range(n):\n mitosis(array, c1,c2,r1)\n r1 +=1\n print(2) \n for i in range(n):\n for j in range(n):\n print(array[i][j], end=\" \")\n print()\n x = int(input())\n if x == -1:\n exit() ","repo_name":"Rajath-55/DS-A","sub_path":"Python/covidSampling.py","file_name":"covidSampling.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"39626627345","text":"from ..rialacha import Riail, RiailIs\n\nfrom ._utils import (\n get_consonant_blocks,\n IsContraction,\n is_foreign,\n expanded_form_passes,\n GUTAÍ,\n GUTAÍ_CAOL,\n GUTAÍ_LEATHAN\n)\n\nclass CaolLeCaol(Riail):\n gairid = \"Caol le caol\"\n prefix = \"clc\"\n fada = \"Caol le caol, leathan le leathan\"\n béarla = \"Slender with slender, broad with broad\"\n míniú = \"Caithfidh go haontaíonn na gutaí ar dhá thaobh consain\"\n soláithraíonn = (\"pointí_teipe\",)\n\n def tástáladh(self, focal: str, aschuir: dict) -> bool:\n aschuir[\"pointí_teipe\"] = []\n focal = focal.lower()\n consain_blocks = get_consonant_blocks(focal)\n for s, f in consain_blocks:\n if s > 0 and f < len(focal) - 1:\n g1 = focal[s - 1]\n g2 = focal[f + 1]\n assert g1 in GUTAÍ\n assert g2 in GUTAÍ\n\n if g1 in GUTAÍ_CAOL and g2 in GUTAÍ_LEATHAN \\\n or g1 in GUTAÍ_LEATHAN and g2 in GUTAÍ_CAOL:\n aschuir[\"pointí_teipe\"].append((s, f))\n\n return not bool(aschuir[\"pointí_teipe\"])\n\ndef a_is_ae(clc_pointí_teipe, focal):\n return all([\n s > 1 and f'{focal[s - 2]}{focal[s - 1]}' == 'ae'\n for s, _ in clc_pointí_teipe\n ])\n\nCAOL_LE_CAOL = (CaolLeCaol()\n # Exceptions\n .eisceacht_a_dhéanamh(\n # Contractions, if the expanded form passes (we put this here to ensure\n # they are not labelled as compound words, e.g. anseo vs anbhás)\n IsContraction() & expanded_form_passes,\n \"...ach amháin 'ansin' agus 'anseo'\"\n )\n .eisceacht_a_dhéanamh(\n # Compound words, if the breakpoints are where CLC fails\n FocailChumaisc() & is_breakpoint_in_failure_area,\n \"...agus roinnt focail chumaisc\"\n )\n .eisceacht_a_dhéanamh(\n # Is a preposition\n is_a_preposition,\n \"...agus roinnt reamhfhocail\"\n )\n .eisceacht_a_dhéanamh(\n # Where a slender e is really a broad ae\n a_is_ae,\n \"...agus nuair a bhíonn 'ae' leathan i ndáirire\"\n )\n .eisceacht_a_dhéanamh(\n # Just one of those words that just begins with an A\n # Mainly: arís, areir, aniar, adeir, ...\n begins_with_a,\n \"...agus roinnt dobhríathair a thosaíonn le 'a'\"\n )\n .eisceacht_a_dhéanamh(\n # This is a loanword\n is_foreign,\n \"...agus focail iasachta\"\n )\n .eisceacht_a_dhéanamh(\n # There are enough examples to suggest this is the one\n # true exception - féadfaidh exists, but so does féadfidh\n # in a range of sources\n RiailIs(\"féadfidh\"),\n \"...agus 'féadfidh'\"\n )\n)\n\n\nif __name__ == \"__main__\":\n focail = [\n 'neamhbheo',\n 'lena',\n 'rith',\n 'smaoineamh',\n 'laethanta',\n 'traenalaí',\n 'ospideal'\n ]\n\n for focal in focail:\n print(focal, CAOL_LE_CAOL.rith(focal, passed_already=False))\n","repo_name":"philtweir/soirbhiochas","sub_path":"soirbhiochas/leabharlann/caol_le_caol.py","file_name":"caol_le_caol.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"70003639117","text":"from pychron.core.displays.display import ErrorDisplay, DisplayController\nfrom pychron.core.utils import get_display_size\n\nds = get_display_size()\n\ngWarningDisplay = DisplayController(\n title=\"Warnings\",\n width=450,\n default_color=\"red\",\n bgcolor=\"light grey\",\n max_blocks=300,\n)\n\ngLoggerDisplay = DisplayController(\n title=\"Info\",\n width=700,\n x=ds.width - 650,\n y=20,\n font_size=10,\n default_color=\"black\",\n bgcolor=\"light grey\",\n max_blocks=300,\n)\n\ngMessageDisplay = DisplayController(\n title=\"Messages\",\n width=480,\n y=100,\n default_color=\"darkgreen\",\n bgcolor=\"light grey\",\n max_blocks=300,\n)\n\ngTraceDisplay = ErrorDisplay(\n title=\"Error Stack\",\n width=825,\n x=int((ds.width - 825) / 2),\n y=100,\n default_color=\"black\",\n)\n\n# ============= EOF =============================================\n","repo_name":"NMGRL/pychron","sub_path":"pychron/core/displays/gdisplays.py","file_name":"gdisplays.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"28"}
+{"seq_id":"18292043586","text":"import Chemins\nfrom Utils import UTILS_Adaptations\nfrom Utils.UTILS_Traduction import _\nimport wx\nimport FonctionsPerso\nimport GestionDB\nimport datetime\nfrom Utils import UTILS_Titulaires\nfrom Utils import UTILS_Questionnaires\nfrom Data import DATA_Civilites as Civilites\nfrom Utils import UTILS_Infos_individus\nfrom Utils import UTILS_Internet\nfrom Utils import UTILS_Interface\nfrom Ctrl.CTRL_ObjectListView import FastObjectListView, ColumnDefn, Filter, CTRL_Outils\n\n\nDICT_INFOS_INDIVIDUS = {}\n\n\ndef DateEngEnDateDD(dateEng):\n return datetime.date(int(dateEng[:4]), int(dateEng[5:7]), int(dateEng[8:10]))\n\ndef GetDictInfosIndividus():\n global DICT_INFOS_INDIVIDUS\n dictInfos = {}\n db = GestionDB.DB()\n req = \"\"\"SELECT IDindividu, individus.nom, prenom, rue_resid, cp_resid, ville_resid, secteurs.nom\n FROM individus\n LEFT JOIN secteurs ON secteurs.IDsecteur = individus.IDsecteur;\"\"\"\n db.ExecuterReq(req)\n listeDonnees = db.ResultatReq()\n db.Close()\n for IDindividu, nom, prenom, rue_resid, cp_resid, ville_resid, nom_secteur in listeDonnees :\n dictInfos[IDindividu] = { \"nom\" : nom, \"prenom\" : prenom, \"rue_resid\" : rue_resid, \"cp_resid\" : cp_resid, \"ville_resid\" : ville_resid, \"nom_secteur\": nom_secteur}\n DICT_INFOS_INDIVIDUS = dictInfos\n\ndef GetInfosOrganisme():\n # Récupération des infos sur l'organisme\n DB = GestionDB.DB()\n req = \"\"\"SELECT nom, rue, cp, ville, tel, fax, mail, site, num_agrement, num_siret, code_ape\n FROM organisateur\n WHERE IDorganisateur=1;\"\"\" \n DB.ExecuterReq(req)\n listeDonnees = DB.ResultatReq() \n DB.Close() \n dictOrganisme = {}\n for nom, rue, cp, ville, tel, fax, mail, site, num_agrement, num_siret, code_ape in listeDonnees :\n if ville != None : ville = ville.capitalize()\n dictOrganisme[\"{ORGANISATEUR_NOM}\"] = nom\n dictOrganisme[\"{ORGANISATEUR_RUE}\"] = rue\n dictOrganisme[\"{ORGANISATEUR_CP}\"] = cp\n dictOrganisme[\"{ORGANISATEUR_VILLE}\"] = ville\n dictOrganisme[\"{ORGANISATEUR_TEL}\"] = tel\n dictOrganisme[\"{ORGANISATEUR_FAX}\"] = fax\n dictOrganisme[\"{ORGANISATEUR_MAIL}\"] = mail\n dictOrganisme[\"{ORGANISATEUR_SITE}\"] = site\n dictOrganisme[\"{ORGANISATEUR_AGREMENT}\"] = num_agrement\n dictOrganisme[\"{ORGANISATEUR_SIRET}\"] = num_siret\n dictOrganisme[\"{ORGANISATEUR_APE}\"] = code_ape\n return dictOrganisme\n\n\ndef FormateStr(valeur=u\"\"):\n try :\n if valeur == None : return u\"\"\n elif type(valeur) == int : return str(valeur)\n elif type(valeur) == float : return str(valeur)\n else : return valeur\n except : \n return u\"\"\n\n\ndef FormateDate(dateStr):\n if dateStr == \"\" or dateStr == None : return \"\"\n date = str(datetime.date(year=int(dateStr[:4]), month=int(dateStr[5:7]), day=int(dateStr[8:10])))\n text = str(date[8:10]) + \"/\" + str(date[5:7]) + \"/\" + str(date[:4])\n return text\n\n\n\n\n\n#-----------INDIVIDUS-----------\n\nclass TrackIndividu(object):\n def __init__(self, listview, donnees, infosIndividus):\n self.listview = listview\n self.infosIndividus = infosIndividus\n self.IDindividu = donnees[\"IDindividu\"]\n self.IDcivilite = donnees[\"IDcivilite\"]\n self.nom = donnees[\"individus.nom\"]\n self.prenom = donnees[\"prenom\"]\n self.IDnationalite = donnees[\"IDnationalite\"]\n self.date_naiss = donnees[\"date_naiss\"]\n self.age = donnees[\"age\"]\n self.IDpays_naiss = donnees[\"IDpays_naiss\"]\n self.cp_naiss = donnees[\"cp_naiss\"]\n self.ville_naiss = donnees[\"ville_naiss\"]\n self.adresse_auto = donnees[\"adresse_auto\"]\n \n # Adresse auto ou manuelle\n if self.adresse_auto != None and self.adresse_auto in DICT_INFOS_INDIVIDUS :\n self.rue_resid = DICT_INFOS_INDIVIDUS[self.adresse_auto][\"rue_resid\"]\n self.cp_resid = DICT_INFOS_INDIVIDUS[self.adresse_auto][\"cp_resid\"]\n self.ville_resid = DICT_INFOS_INDIVIDUS[self.adresse_auto][\"ville_resid\"]\n self.secteur = DICT_INFOS_INDIVIDUS[self.adresse_auto][\"nom_secteur\"]\n else:\n self.rue_resid = donnees[\"rue_resid\"]\n self.cp_resid = donnees[\"cp_resid\"]\n self.ville_resid = donnees[\"ville_resid\"]\n self.secteur = donnees[\"secteurs.nom\"]\n \n self.profession = donnees[\"profession\"]\n self.employeur = donnees[\"employeur\"]\n self.travail_tel = donnees[\"travail_tel\"]\n self.travail_fax = donnees[\"travail_fax\"]\n self.travail_mail = donnees[\"travail_mail\"]\n self.tel_domicile = donnees[\"tel_domicile\"]\n self.tel_mobile = donnees[\"tel_mobile\"]\n self.tel_fax = donnees[\"tel_fax\"]\n self.mail = donnees[\"mail\"]\n self.tel_fax = donnees[\"tel_fax\"]\n self.genre = donnees[\"genre\"]\n self.categorieCivilite = donnees[\"categorieCivilite\"]\n self.civiliteLong = donnees[\"civiliteLong\"]\n self.civiliteAbrege = donnees[\"civiliteAbrege\"]\n self.nomImage = donnees[\"nomImage\"] \n \n # Récupération des réponses des questionnaires\n for dictQuestion in self.listview.LISTE_QUESTIONS :\n setattr(self, \"question_%d\" % dictQuestion[\"IDquestion\"], self.listview.GetReponse(dictQuestion[\"IDquestion\"], self.IDindividu))\n\n\n def GetDict(self):\n dictTemp = {\n \"{IDINDIVIDU}\" : str(self.IDindividu),\n \"{CODEBARRES_ID_INDIVIDU}\" : \"I%06d\" % self.IDindividu,\n \"{INDIVIDU_CIVILITE_LONG}\" : FormateStr(self.civiliteLong),\n \"{INDIVIDU_CIVILITE_COURT}\" : FormateStr(self.civiliteAbrege),\n \"{INDIVIDU_GENRE}\" : self.genre,\n \"{INDIVIDU_NOM}\" : FormateStr(self.nom),\n \"{INDIVIDU_PRENOM}\" : FormateStr(self.prenom),\n \"{INDIVIDU_DATE_NAISS}\" : FormateDate(self.date_naiss),\n \"{INDIVIDU_AGE}\" : FormateStr(self.age),\n \"{INDIVIDU_CP_NAISS}\" : FormateStr(self.cp_naiss),\n \"{INDIVIDU_VILLE_NAISS}\" : FormateStr(self.ville_naiss),\n \"{INDIVIDU_RUE}\" : FormateStr(self.rue_resid),\n \"{INDIVIDU_CP}\" : FormateStr(self.cp_resid),\n \"{INDIVIDU_VILLE}\" : FormateStr(self.ville_resid),\n \"{INDIVIDU_PROFESSION}\" : FormateStr(self.profession),\n \"{INDIVIDU_EMPLOYEUR}\" : FormateStr(self.employeur),\n \"{INDIVIDU_TEL_DOMICILE}\" : FormateStr(self.tel_domicile),\n \"{INDIVIDU_TEL_MOBILE}\" : FormateStr(self.tel_mobile),\n \"{INDIVIDU_FAX}\" : FormateStr(self.tel_fax),\n \"{INDIVIDU_EMAIL}\" : FormateStr(self.mail),\n \"{INDIVIDU_TEL_PRO}\" : FormateStr(self.travail_tel),\n \"{INDIVIDU_FAX_PRO}\" : FormateStr(self.travail_fax),\n \"{INDIVIDU_EMAIL_PRO}\" : FormateStr(self.travail_mail),\n \"nomImage\" : self.nomImage,\n }\n \n # Questionnaires\n for dictQuestion in self.listview.LISTE_QUESTIONS :\n dictTemp[\"{QUESTION_%d}\" % dictQuestion[\"IDquestion\"]] = FormateStr(getattr(self, \"question_%d\" % dictQuestion[\"IDquestion\"]))\n if dictQuestion[\"controle\"] == \"codebarres\" :\n dictTemp[\"{CODEBARRES_QUESTION_%d}\" % dictQuestion[\"IDquestion\"]] = FormateStr(getattr(self, \"question_%d\" % dictQuestion[\"IDquestion\"]))\n\n # Infos de base individus\n dictTemp.update(self.infosIndividus.GetDictValeurs(mode=\"individu\", ID=self.IDindividu, formatChamp=True))\n \n return dictTemp\n\ndef GetListeIndividus(listview=None, listeActivites=None, presents=None, IDindividu=None, infosIndividus=None):\n # Conditions Activites\n if listeActivites == None or listeActivites == [] :\n conditionActivites = \"\"\n else:\n if len(listeActivites) == 1 :\n conditionActivites = \" AND inscriptions.IDactivite=%d AND inscriptions.statut='ok' AND (inscriptions.date_desinscription IS NULL OR inscriptions.date_desinscription>='%s') \" % (listeActivites[0], datetime.date.today())\n else:\n conditionActivites = \" AND inscriptions.IDactivite IN %s AND inscriptions.statut='ok' AND (inscriptions.date_desinscription IS NULL OR inscriptions.date_desinscription>='%s') \" % (str(tuple(listeActivites)), datetime.date.today())\n\n # Conditions Présents\n conditionPresents = \"\"\n jointurePresents = \"\"\n if presents != None :\n conditionPresents = \" AND (consommations.date>='%s' AND consommations.date<='%s' AND consommations.etat IN ('reservation', 'present'))\" % (str(presents[0]), str(presents[1]))\n jointurePresents = \"LEFT JOIN consommations ON consommations.IDindividu = individus.IDindividu\"\n \n # Condition Individu donné\n conditionIndividus = \"\"\n if IDindividu != None :\n conditionIndividus = \" AND individus.IDindividu=%d\" % IDindividu\n \n # Récupération des individus\n listeChamps = (\n \"individus.IDindividu\", \"IDcivilite\", \"individus.nom\", \"prenom\", \"num_secu\",\"IDnationalite\",\n \"date_naiss\", \"IDpays_naiss\", \"cp_naiss\", \"ville_naiss\",\n \"adresse_auto\", \"rue_resid\", \"cp_resid\", \"ville_resid\", \n \"IDcategorie_travail\", \"profession\", \"employeur\", \"travail_tel\", \"travail_fax\", \"travail_mail\", \n \"tel_domicile\", \"tel_mobile\", \"tel_fax\", \"mail\", \"secteurs.nom\",\n )\n DB = GestionDB.DB()\n req = \"\"\"\n SELECT %s\n FROM individus \n LEFT JOIN inscriptions ON inscriptions.IDindividu = individus.IDindividu\n LEFT JOIN secteurs ON secteurs.IDsecteur = individus.IDsecteur\n %s\n WHERE individus.IDindividu>0 AND individus.deces != 1 AND individus.etat IS NULL %s %s %s\n GROUP BY individus.IDindividu\n ;\"\"\" % (\",\".join(listeChamps), jointurePresents, conditionActivites, conditionPresents, conditionIndividus)\n \n DB.ExecuterReq(req)\n listeDonnees = DB.ResultatReq()\n DB.Close() \n\n # Récupération des civilités\n dictCivilites = Civilites.GetDictCivilites()\n \n # Récupération des adresses auto\n GetDictInfosIndividus()\n \n listeListeView = []\n for valeurs in listeDonnees :\n dictTemp = {}\n dictTemp[\"IDindividu\"] = valeurs[0]\n # Infos de la table Individus\n for index in range(0, len(listeChamps)) :\n nomChamp = listeChamps[index]\n dictTemp[nomChamp] = valeurs[index]\n # Infos sur la civilité\n if dictTemp[\"IDcivilite\"] == None or dictTemp[\"IDcivilite\"] == \"\" :\n IDcivilite = 1\n else :\n IDcivilite = dictTemp[\"IDcivilite\"]\n dictTemp[\"genre\"] = dictCivilites[IDcivilite][\"sexe\"]\n dictTemp[\"categorieCivilite\"] = dictCivilites[IDcivilite][\"categorie\"]\n dictTemp[\"civiliteLong\"] = dictCivilites[IDcivilite][\"civiliteLong\"]\n dictTemp[\"civiliteAbrege\"] = dictCivilites[IDcivilite][\"civiliteAbrege\"] \n dictTemp[\"nomImage\"] = dictCivilites[IDcivilite][\"nomImage\"] \n \n if not dictTemp[\"date_naiss\"]:\n dictTemp[\"age\"] = None\n else:\n datenaissDD = datetime.date(year=int(dictTemp[\"date_naiss\"][:4]), month=int(dictTemp[\"date_naiss\"][5:7]), day=int(dictTemp[\"date_naiss\"][8:10]))\n datedujour = datetime.date.today()\n age = (datedujour.year - datenaissDD.year) - int((datedujour.month, datedujour.day) < (datenaissDD.month, datenaissDD.day))\n dictTemp[\"age\"] = age\n \n # Formatage sous forme de TRACK\n track = TrackIndividu(listview, dictTemp, infosIndividus)\n listeListeView.append(track)\n \n return listeListeView\n\n\n#-----------FAMILLES-----------\n\nclass TrackFamille(object):\n def __init__(self, listview, donnees, infosIndividus):\n self.listview = listview\n self.infosIndividus = infosIndividus\n self.IDfamille = donnees[\"IDfamille\"]\n self.nomTitulaires = donnees[\"titulaires\"]\n self.rue = donnees[\"rue\"]\n self.cp = donnees[\"cp\"]\n self.ville = donnees[\"ville\"]\n self.secteur = donnees[\"secteur\"]\n self.regime = donnees[\"nomRegime\"]\n self.caisse = donnees[\"nomCaisse\"]\n self.numAlloc = donnees[\"numAlloc\"]\n\n self.internet_identifiant = donnees[\"internet_identifiant\"]\n self.internet_mdp = donnees[\"internet_mdp\"]\n if self.internet_mdp and self.internet_mdp.startswith(\"custom\"):\n self.internet_mdp = \"********\"\n if self.internet_mdp and self.internet_mdp.startswith(\"#@#\"):\n self.internet_mdp = UTILS_Internet.DecrypteMDP(self.internet_mdp, IDfichier=donnees[\"IDfichier\"])\n\n # Ajout des adresses Emails des titulaires\n self.listeMails = donnees[\"listeMails\"]\n if len(self.listeMails) > 0 :\n self.mail = self.listeMails[0]\n else :\n self.mail = None\n \n # Récupération des réponses des questionnaires\n for dictQuestion in self.listview.LISTE_QUESTIONS :\n setattr(self, \"question_%d\" % dictQuestion[\"IDquestion\"], self.listview.GetReponse(dictQuestion[\"IDquestion\"], self.IDfamille))\n\n def GetDict(self):\n dictTemp = {\n \"{IDFAMILLE}\" : str(self.IDfamille),\n \"{CODEBARRES_ID_FAMILLE}\" : \"A%06d\" % self.IDfamille,\n \"{FAMILLE_NOM}\" : FormateStr(self.nomTitulaires),\n \"{FAMILLE_RUE}\" : FormateStr(self.rue),\n \"{FAMILLE_CP}\" : FormateStr(self.cp),\n \"{FAMILLE_VILLE}\" : FormateStr(self.ville),\n \"{FAMILLE_SECTEUR}\": FormateStr(self.secteur),\n \"{FAMILLE_REGIME}\" : FormateStr(self.regime),\n \"{FAMILLE_CAISSE}\" : FormateStr(self.caisse),\n \"{FAMILLE_NUMALLOC}\" : FormateStr(self.numAlloc),\n \"{FAMILLE_INTERNET_IDENTIFIANT}\": FormateStr(self.internet_identifiant),\n \"{FAMILLE_INTERNET_MDP}\": FormateStr(self.internet_mdp),\n }\n \n # Questionnaires\n for dictQuestion in self.listview.LISTE_QUESTIONS :\n dictTemp[\"{QUESTION_%d}\" % dictQuestion[\"IDquestion\"]] = FormateStr(getattr(self, \"question_%d\" % dictQuestion[\"IDquestion\"]))\n if dictQuestion[\"controle\"] == \"codebarres\" :\n dictTemp[\"{CODEBARRES_QUESTION_%d}\" % dictQuestion[\"IDquestion\"]] = FormateStr(getattr(self, \"question_%d\" % dictQuestion[\"IDquestion\"]))\n\n # Infos de base individus\n dictTemp.update(self.infosIndividus.GetDictValeurs(mode=\"famille\", ID=self.IDfamille, formatChamp=True))\n\n return dictTemp\n\ndef GetListeFamilles(listview=None, listeActivites=None, presents=None, IDfamille=None, infosIndividus=None):\n \"\"\" Récupération des infos familles \"\"\"\n # Conditions Activites\n if listeActivites == None or listeActivites == [] :\n conditionActivites = \"\"\n else:\n if len(listeActivites) == 1 :\n conditionActivites = \" AND inscriptions.IDactivite=%d\" % listeActivites[0]\n else:\n conditionActivites = \" AND inscriptions.IDactivite IN %s\" % str(tuple(listeActivites))\n\n # Conditions Présents\n conditionPresents = \"\"\n jointurePresents = \"\"\n if presents != None :\n conditionPresents = \" AND (consommations.date>='%s' AND consommations.date<='%s' AND consommations.etat IN ('reservation', 'present'))\" % (str(presents[0]), str(presents[1]))\n jointurePresents = \"LEFT JOIN consommations ON consommations.IDindividu = individus.IDindividu\"\n\n # Condition Famille donnée\n conditionFamilles = \"\"\n if IDfamille != None :\n conditionFamilles = \" AND familles.IDfamille=%d\" % IDfamille\n\n # Récupération des régimes et num d'alloc pour chaque famille\n DB = GestionDB.DB()\n req = \"\"\"\n SELECT \n familles.IDfamille, regimes.nom, caisses.nom, num_allocataire, internet_identifiant, internet_mdp\n FROM familles \n LEFT JOIN inscriptions ON inscriptions.IDfamille = familles.IDfamille\n LEFT JOIN individus ON individus.IDindividu = inscriptions.IDindividu\n %s\n AND inscriptions.IDfamille = familles.IDfamille\n LEFT JOIN caisses ON caisses.IDcaisse = familles.IDcaisse\n LEFT JOIN regimes ON regimes.IDregime = caisses.IDregime\n WHERE familles.etat IS NULL AND inscriptions.statut='ok' AND (inscriptions.date_desinscription IS NULL OR inscriptions.date_desinscription>='%s') %s %s %s\n GROUP BY familles.IDfamille\n ;\"\"\" % (jointurePresents, datetime.date.today(), conditionActivites, conditionPresents, conditionFamilles)\n\n DB.ExecuterReq(req)\n listeFamilles = DB.ResultatReq()\n DB.Close()\n\n IDfichier = FonctionsPerso.GetIDfichier()\n \n # Formatage des données\n listeListeView = []\n titulaires = UTILS_Titulaires.GetTitulaires() \n for IDfamille, nomRegime, nomCaisse, numAlloc, internet_identifiant, internet_mdp in listeFamilles :\n dictTemp = {}\n if IDfamille != None and IDfamille in titulaires :\n nomTitulaires = titulaires[IDfamille][\"titulairesSansCivilite\"]\n rue = titulaires[IDfamille][\"adresse\"][\"rue\"]\n cp = titulaires[IDfamille][\"adresse\"][\"cp\"]\n ville = titulaires[IDfamille][\"adresse\"][\"ville\"]\n listeMails = titulaires[IDfamille][\"listeMails\"]\n secteur = titulaires[IDfamille][\"adresse\"][\"nomSecteur\"]\n else :\n nomTitulaires = _(u\"Aucun titulaire\")\n rue = u\"\"\n cp = u\"\"\n ville = u\"\"\n listeMails = []\n dictTemp = {\n \"IDfamille\" : IDfamille, \"titulaires\" : nomTitulaires, \"nomRegime\" : nomRegime, \n \"nomCaisse\" : nomCaisse, \"numAlloc\" : numAlloc, \"secteur\": secteur,\n \"rue\" : rue, \"cp\" : cp, \"ville\" : ville, \"listeMails\" : listeMails, \"IDfichier\": IDfichier,\n \"internet_identifiant\": internet_identifiant, \"internet_mdp\": internet_mdp,\n }\n \n # Formatage sous forme de TRACK\n track = TrackFamille(listview, dictTemp, infosIndividus)\n listeListeView.append(track)\n \n return listeListeView\n\n\n#-----------LISTVIEW-----------\n\nclass ListView(FastObjectListView):\n def __init__(self, *args, **kwds):\n # Récupération des paramètres perso\n self.categorie = kwds.pop(\"categorie\", \"individus\")\n self.IDindividu = kwds.pop(\"IDindividu\", None)\n self.IDfamille = kwds.pop(\"IDfamille\", None)\n self.listeActivites = None\n self.presents = None\n # Infos organisme\n self.dictOrganisme = GetInfosOrganisme()\n self.UtilsQuestionnaires = UTILS_Questionnaires.Questionnaires()\n # Initialisation du listCtrl\n self.nom_fichier_liste = __file__\n FastObjectListView.__init__(self, *args, **kwds)\n # Binds perso\n self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)\n\n def InitModel(self):\n # Récupération des questions\n self.LISTE_QUESTIONS = self.UtilsQuestionnaires.GetQuestions(type=self.categorie[:-1])\n \n # Récupération des questionnaires\n self.DICT_QUESTIONNAIRES = self.UtilsQuestionnaires.GetReponses(type=self.categorie[:-1])\n\n # Récupération des infos de base individus et familles\n self.infosIndividus = UTILS_Infos_individus.Informations() \n \n # Récupération des tracks\n if self.categorie == \"individus\" :\n self.donnees = GetListeIndividus(self, self.listeActivites, self.presents, self.IDindividu, self.infosIndividus)\n else:\n self.donnees = GetListeFamilles(self, self.listeActivites, self.presents, self.IDfamille, self.infosIndividus)\n\n def InitObjectListView(self):\n # Création du imageList\n for categorie, civilites in Civilites.LISTE_CIVILITES :\n for IDcivilite, CiviliteLong, CiviliteAbrege, nomImage, genre in civilites :\n indexImg = self.AddNamedImages(nomImage, wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/%s\" % nomImage), wx.BITMAP_TYPE_PNG))\n \n def GetImageCivilite(track):\n return track.nomImage\n\n def FormateDate(dateStr):\n if dateStr == \"\" or dateStr == None : return \"\"\n date = str(datetime.date(year=int(dateStr[:4]), month=int(dateStr[5:7]), day=int(dateStr[8:10])))\n text = str(date[8:10]) + \"/\" + str(date[5:7]) + \"/\" + str(date[:4])\n return text\n \n def FormateAge(age):\n if age == None : return \"\"\n return _(u\"%d ans\") % age\n \n # Couleur en alternance des lignes\n self.oddRowsBackColor = UTILS_Interface.GetValeur(\"couleur_tres_claire\", wx.Colour(240, 251, 237))\n self.evenRowsBackColor = wx.Colour(255, 255, 255)\n self.useExpansionColumn = True\n \n if self.categorie == \"individus\" :\n # INDIVIDUS\n liste_Colonnes = [\n ColumnDefn(u\"\", \"left\", 22, \"IDindividu\", typeDonnee=\"entier\", imageGetter=GetImageCivilite),\n ColumnDefn(_(u\"Nom\"), 'left', 100, \"nom\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Prénom\"), \"left\", 100, \"prenom\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Date naiss.\"), \"left\", 72, \"date_naiss\", typeDonnee=\"date\", stringConverter=FormateDate),\n ColumnDefn(_(u\"Age\"), \"left\", 50, \"age\", typeDonnee=\"entier\", stringConverter=FormateAge),\n ColumnDefn(_(u\"Rue\"), \"left\", 150, \"rue_resid\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"C.P.\"), \"left\", 50, \"cp_resid\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Ville\"), \"left\", 120, \"ville_resid\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Secteur\"), \"left\", 120, \"secteur\", typeDonnee=\"texte\"),\n## ColumnDefn(_(u\"Tél. domicile\"), \"left\", 100, \"tel_domicile\"),\n## ColumnDefn(_(u\"Tél. mobile\"), \"left\", 100, \"tel_mobile\"),\n ColumnDefn(_(u\"Email\"), \"left\", 150, \"mail\", typeDonnee=\"texte\"),\n## ColumnDefn(_(u\"Profession\"), \"left\", 150, \"profession\"),\n## ColumnDefn(_(u\"Employeur\"), \"left\", 150, \"employeur\"),\n## ColumnDefn(_(u\"Tél pro.\"), \"left\", 100, \"travail_tel\"),\n## ColumnDefn(_(u\"Email pro.\"), \"left\", 150, \"travail_mail\"),\n ]\n \n else:\n # FAMILLES\n liste_Colonnes = [\n ColumnDefn(_(u\"ID\"), \"left\", 0, \"IDfamille\", typeDonnee=\"entier\"),\n ColumnDefn(_(u\"Famille\"), 'left', 200, \"nomTitulaires\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Rue\"), \"left\", 160, \"rue\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"C.P.\"), \"left\", 45, \"cp\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Ville\"), \"left\", 120, \"ville\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Secteur\"), \"left\", 120, \"secteur\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Email\"), \"left\", 100, \"mail\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Régime\"), \"left\", 130, \"regime\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Caisse\"), \"left\", 130, \"caisse\", typeDonnee=\"texte\"),\n ColumnDefn(_(u\"Numéro Alloc.\"), \"left\", 120, \"numAlloc\", typeDonnee=\"texte\"),\n ] \n \n # Ajout des questions des questionnaires\n liste_Colonnes.extend(UTILS_Questionnaires.GetColonnesForOL(self.LISTE_QUESTIONS))\n\n self.SetColumns(liste_Colonnes)\n self.CreateCheckStateColumn(0)\n \n if self.categorie == \"individus\" :\n self.SetEmptyListMsg(_(u\"Aucun individu\"))\n else:\n self.SetEmptyListMsg(_(u\"Aucune famille\"))\n self.SetEmptyListMsgFont(wx.FFont(11, wx.DEFAULT, False, \"Tekton\"))\n self.SetSortColumn(self.columns[2])\n self.SetObjects(self.donnees)\n \n def MAJ(self, categorie=None, listeActivites=None, presents=None):\n if categorie != None :\n if categorie ==\"individu\" : self.categorie = \"individus\"\n if categorie ==\"famille\" : self.categorie = \"familles\"\n if listeActivites != None : self.listeActivites = listeActivites\n if presents != None : self.presents = presents\n self.InitModel()\n self.InitObjectListView()\n\n def GetReponse(self, IDquestion=None, ID=None):\n if IDquestion in self.DICT_QUESTIONNAIRES :\n if ID in self.DICT_QUESTIONNAIRES[IDquestion] :\n return self.DICT_QUESTIONNAIRES[IDquestion][ID]\n return u\"\"\n\n def Selection(self):\n return self.GetSelectedObjects()\n \n def GetTracksCoches(self):\n return self.GetCheckedObjects()\n \n def GetInfosCoches(self):\n listeDonnees = []\n for track in self.GetTracksCoches() :\n dictTemp = track.GetDict()\n for code, valeur in self.dictOrganisme.items() :\n dictTemp[code] = valeur\n listeDonnees.append(dictTemp)\n return listeDonnees\n \n def SetIDcoches(self, listeID=[]):\n for track in self.donnees :\n if self.categorie == \"individus\" :\n ID = track.IDindividu\n else :\n ID = track.IDfamille\n if ID in listeID :\n self.Check(track)\n self.RefreshObject(track)\n \n def OnCheck(self, track=None):\n try :\n self.GetParent().OnCheck(track)\n except :\n pass\n\n def OnContextMenu(self, event):\n \"\"\"Ouverture du menu contextuel \"\"\" \n # Création du menu contextuel\n menuPop = UTILS_Adaptations.Menu()\n \n # Tout sélectionner\n item = wx.MenuItem(menuPop, 20, _(u\"Tout cocher\"))\n menuPop.AppendItem(item)\n self.Bind(wx.EVT_MENU, self.CocheListeTout, id=20)\n\n # Tout dé-sélectionner\n item = wx.MenuItem(menuPop, 30, _(u\"Tout décocher\"))\n menuPop.AppendItem(item)\n self.Bind(wx.EVT_MENU, self.CocheListeRien, id=30)\n \n menuPop.AppendSeparator()\n \n # Apercu avant impression\n item = wx.MenuItem(menuPop, 40, _(u\"Aperçu avant impression\"))\n bmp = wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/Apercu.png\"), wx.BITMAP_TYPE_PNG)\n item.SetBitmap(bmp)\n menuPop.AppendItem(item)\n self.Bind(wx.EVT_MENU, self.Apercu, id=40)\n \n # Imprimer\n item = wx.MenuItem(menuPop, 50, _(u\"Imprimer\"))\n bmp = wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/Imprimante.png\"), wx.BITMAP_TYPE_PNG)\n item.SetBitmap(bmp)\n menuPop.AppendItem(item)\n self.Bind(wx.EVT_MENU, self.Imprimer, id=50)\n \n menuPop.AppendSeparator()\n \n # Export Texte\n item = wx.MenuItem(menuPop, 600, _(u\"Exporter au format Texte\"))\n bmp = wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/Texte2.png\"), wx.BITMAP_TYPE_PNG)\n item.SetBitmap(bmp)\n menuPop.AppendItem(item)\n self.Bind(wx.EVT_MENU, self.ExportTexte, id=600)\n \n # Export Excel\n item = wx.MenuItem(menuPop, 700, _(u\"Exporter au format Excel\"))\n bmp = wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/Excel.png\"), wx.BITMAP_TYPE_PNG)\n item.SetBitmap(bmp)\n menuPop.AppendItem(item)\n self.Bind(wx.EVT_MENU, self.ExportExcel, id=700)\n\n self.PopupMenu(menuPop)\n menuPop.Destroy()\n\n def Impression(self, mode=\"preview\"):\n if self.donnees == None or len(self.donnees) == 0 :\n dlg = wx.MessageDialog(self, _(u\"Il n'y a aucune donnée à imprimer !\"), _(u\"Erreur\"), wx.OK | wx.ICON_EXCLAMATION)\n dlg.ShowModal()\n dlg.Destroy()\n return\n from Utils import UTILS_Printer\n prt = UTILS_Printer.ObjectListViewPrinter(self, titre=_(u\"Liste des %s\") % self.categorie, intro=\"\", total=\"\", format=\"A\", orientation=wx.LANDSCAPE)\n if mode == \"preview\" :\n prt.Preview()\n else:\n prt.Print()\n \n def Apercu(self, event):\n self.Impression(\"preview\")\n\n def Imprimer(self, event):\n self.Impression(\"print\")\n\n def ExportTexte(self, event):\n from Utils import UTILS_Export\n UTILS_Export.ExportTexte(self, titre=_(u\"Liste des %s\") % self.categorie)\n \n def ExportExcel(self, event):\n from Utils import UTILS_Export\n UTILS_Export.ExportExcel(self, titre=_(u\"Liste des %s\") % self.categorie)\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n\n\nclass BarreRecherche(wx.SearchCtrl):\n def __init__(self, parent):\n wx.SearchCtrl.__init__(self, parent, size=(-1, -1), style=wx.TE_PROCESS_ENTER)\n self.parent = parent\n self.rechercheEnCours = False\n \n self.SetDescriptiveText(_(u\"Rechercher...\"))\n self.ShowSearchButton(True)\n \n self.listView = self.parent.ctrl_listview\n nbreColonnes = self.listView.GetColumnCount()\n self.listView.SetFilter(Filter.TextSearch(self.listView, self.listView.columns[0:nbreColonnes]))\n \n self.SetCancelBitmap(wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/Interdit.png\"), wx.BITMAP_TYPE_PNG))\n self.SetSearchBitmap(wx.Bitmap(Chemins.GetStaticPath(\"Images/16x16/Loupe.png\"), wx.BITMAP_TYPE_PNG))\n \n self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnSearch)\n self.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self.OnCancel)\n self.Bind(wx.EVT_TEXT_ENTER, self.OnDoSearch)\n self.Bind(wx.EVT_TEXT, self.OnDoSearch)\n\n def OnSearch(self, evt):\n self.Recherche()\n \n def OnCancel(self, evt):\n self.SetValue(\"\")\n self.Recherche()\n\n def OnDoSearch(self, evt):\n self.Recherche()\n \n def Recherche(self):\n txtSearch = self.GetValue()\n self.ShowCancelButton(len(txtSearch))\n self.listView.GetFilter().SetText(txtSearch)\n self.listView.RepopulateList()\n self.Refresh() \n\n\n# -------------------------------------------------------------------------------------------------------------------------------------------\n\nclass MyFrame(wx.Frame):\n def __init__(self, *args, **kwds):\n wx.Frame.__init__(self, *args, **kwds)\n panel = wx.Panel(self, -1, name=\"test1\")\n sizer_1 = wx.BoxSizer(wx.VERTICAL)\n sizer_1.Add(panel, 1, wx.ALL|wx.EXPAND)\n self.SetSizer(sizer_1)\n self.myOlv = ListView(panel, id=-1, name=\"OL_test\", style=wx.LC_REPORT|wx.SUNKEN_BORDER|wx.LC_SINGLE_SEL|wx.LC_HRULES|wx.LC_VRULES)\n self.myOlv.MAJ(categorie=\"individu\", listeActivites=None, presents=None)\n sizer_2 = wx.BoxSizer(wx.VERTICAL)\n sizer_2.Add(self.myOlv, 1, wx.ALL|wx.EXPAND, 4)\n panel.SetSizer(sizer_2)\n self.Layout()\n\nif __name__ == '__main__':\n app = wx.App(0)\n #wx.InitAllImageHandlers()\n frame_1 = MyFrame(None, -1, \"OL TEST\")\n app.SetTopWindow(frame_1)\n frame_1.Show()\n app.MainLoop()\n","repo_name":"Noethys/Noethys","sub_path":"noethys/Ol/OL_Etiquettes.py","file_name":"OL_Etiquettes.py","file_ext":"py","file_size_in_byte":30798,"program_lang":"python","lang":"fr","doc_type":"code","stars":28,"dataset":"github-code","pt":"28"}
+{"seq_id":"71970693516","text":"from collections import deque\n\nimport pickle\nimport gc\nimport gzip\nfrom typing import List\n\nimport numpy as np\nfrom agent_code.baby_terminator.custom_event_handling import custom_game_events\n\nfrom .callbacks import state_to_features\n\nimport torch\nfrom torch import nn\nfrom .utils import *\n\n\n# Hyper parameters -- DO modify\nTRANSITION_HISTORY_SIZE = 3 # keep only ... last transitions\nRECORD_ENEMY_TRANSITIONS = 1.0 # record enemy transitions with probability ...\n\n\ndef setup_training(self):\n \"\"\"\n Initialise self for training purpose.\n\n This is called after `setup` in callbacks.py.\n\n :param self: This object is passed to all callbacks and you can set arbitrary values.\n \"\"\"\n self.logger.info(\"Enter train mode\")\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.q_value = None\n self.loss = None\n self.number_of_executed_episodes = 0\n\n\ndef game_events_occurred(\n self,\n old_game_state: dict,\n self_action: str,\n new_game_state: dict,\n events: List[str],\n):\n \"\"\"\n Called once per step to allow intermediate rewards based on game events.\n\n When this method is called, self.events will contain a list of all game\n events relevant to your agent that occurred during the previous step. Consult\n settings.py to see what events are tracked. You can hand out rewards to your\n agent based on these events and your knowledge of the (new) game state.\n\n This is *one* of the places where you could update your agent.\n\n :param self: This object is passed to all callbacks and you can set arbitrary values.\n :param old_game_state: The state that was passed to the last call of `act`.\n :param self_action: The action that you took.\n :param new_game_state: The state the agent is in now.\n :param events: The events that occurred when going from `old_game_state` to `new_game_state`\n \"\"\"\n custom_events = custom_game_events(\n self, old_game_state, new_game_state, events, self_action\n )\n events.extend(custom_events)\n self.logger.debug(\n f'Encountered game event(s) {\", \".join(map(repr, events))} in step {new_game_state[\"step\"]}'\n )\n if old_game_state is None:\n return\n # get the input for the CNN\n state = state_to_features(self, old_game_state)\n if state is not None:\n action = torch.tensor([ACTIONS.index(self_action)], device=device)\n reward = reward_from_events(self, events)\n self.memory.rewards_of_round.append(reward)\n if new_game_state is None:\n next_state = None\n else:\n next_state = state_to_features(self, new_game_state)\n self.logger.info(f\"overall reward of step {reward}\")\n reward = torch.tensor(reward, device=device)\n # push the state to the memory in order to be able to learn from it\n self.memory.push(state, action, next_state, reward)\n\n # needs to be before optimize otherwise the events occured are not taken into account\n increment_event_counts(self, events)\n\n optimize_model(self)\n\n\ndef end_of_round(self, last_game_state: dict, last_action: str, events: List[str]):\n \"\"\"\n Called at the end of each game or when the agent died to hand out final rewards.\n This replaces game_events_occurred in this round.\n\n This is similar to game_events_occurred. self.events will contain all events that\n occurred during your agent's final step.\n\n This is *one* of the places where you could update your agent.\n This is also a good place to store an agent that you updated.\n\n :param self: The same object that is passed to all of your callbacks.\n \"\"\"\n self.logger.debug(\n f'Encountered event(s) {\", \".join(map(repr, events))} in final step'\n )\n custom_events = custom_game_events(self, None, last_game_state, events, last_action)\n events.extend(custom_events)\n\n state = state_to_features(self, last_game_state)\n action = torch.tensor([ACTIONS.index(last_action)], device=device)\n reward = reward_from_events(self, events)\n\n self.memory.rewards_of_round.append(reward)\n overall_reward = sum(self.memory.rewards_of_round)\n self.logger.info(f\"Overall reward at end of round: {overall_reward}\")\n self.memory.rewards_after_round.append(overall_reward)\n # reset memory for next round\n self.memory.rewards_of_round = []\n\n reward = torch.tensor(reward, device=device)\n self.memory.push(state, action, None, reward)\n self.memory.shortest_paths_out_of_explosion = []\n self.memory.shortest_paths_to_coin = []\n self.memory.shortest_paths_to_enemy = []\n self.memory.shortest_paths_to_crate = []\n self.memory.left_explosion_zone = False\n optimize_model(self)\n\n # increment episode count\n self.number_of_executed_episodes += 1\n\n # Add Q value to memory\n self.memory.q_value_after_episode.append(self.q_value)\n # Add loss to memory\n self.memory.loss_after_episode.append(self.loss)\n\n # Store the model\n if self.number_of_executed_episodes == last_game_state[\"number_rounds\"]:\n gc.disable()\n with gzip.open(\"my-saved-model.pkl.gz\", \"wb\") as f:\n pickle.dump(\n [self.policy_net, self.target_net, self.optimizer, self.memory],\n f,\n protocol=pickle.HIGHEST_PROTOCOL,\n )\n gc.enable()\n\n\ndef reward_from_events(self, events: List[str]) -> int:\n \"\"\"\n Compute the reward based on game events.\n\n This function computes the cumulative reward for the agent based on the game events.\n\n :param self: The agent instance.\n :param events: List of game events that occurred.\n\n :return: int: The total reward.\n \"\"\"\n\n reward_sum = 0\n rewarded_events = []\n for event in events:\n if event in self.memory.game_rewards:\n reward_sum += self.memory.game_rewards[event]\n rewarded_events.append(event)\n self.logger.info(\n f\"Awarded {reward_sum} for the {len(rewarded_events)} events {', '.join(rewarded_events)}\"\n )\n return reward_sum\n\n\ndef optimize_model(self):\n \"\"\"\n Optimize the agent's policy network using Q-Learning methods.\n\n This function is responsible for optimizing the agent's policy network by computing the loss using the Q-Learning method and performing backpropagation.\n\n :param self: The agent instance.\n\n :return: None\n \"\"\"\n self.logger.info(\"Optimizing model\")\n # Adapt the hyper parameters\n BATCH_SIZE, GAMMA = self.memory.train_params.values()\n\n if len(self.memory) < BATCH_SIZE:\n # if the memory does not contain enough information (< BATCH_SIZE) than do not learn\n return\n transitions = self.memory.sample(BATCH_SIZE)\n # \"online learning\" by always including the last step to ensure we learn from this experience\n batch = Transition(*zip(*transitions))\n\n non_final_mask = torch.tensor(\n tuple(map(lambda s: s is not None, batch.next_state)),\n device=device,\n dtype=torch.bool,\n )\n non_final_next_states = torch.stack(\n [s for s in batch.next_state if s is not None]\n ).float()\n\n state_batch = torch.stack(batch.state).float()\n action_batch = torch.stack(batch.action)\n reward_batch = torch.stack(batch.reward)\n\n # Compute Q value for all actions taken in the batch\n state_action_values = self.policy_net(state_batch).gather(1, action_batch)\n\n # compute the expected Q values\n next_state_values = torch.zeros(BATCH_SIZE, device=device)\n with torch.no_grad():\n next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(\n 1\n )[0]\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n # compute loss\n loss = nn.functional.smooth_l1_loss(\n state_action_values, expected_state_action_values.unsqueeze(1)\n )\n self.logger.info(f\"Loss of {loss}\")\n\n # Add Q value to object\n self.q_value = expected_state_action_values\n # Add loss to object\n self.loss = loss\n\n # back propagation\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_value_(self.policy_net.parameters(), 100)\n self.optimizer.step()\n\n # Check if it's time to update the target network\n if self.memory.steps_since_last_update >= self.memory.update_frequency:\n self.logger.info(\"Update target network\")\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()\n # Reset the steps since last update\n self.memory.steps_since_last_update = 0\n else:\n # Increment the counter\n self.memory.steps_since_last_update += 1\n\n # Dynamically adjust UPDATE_FREQUENCY via exp function only after the network has been updated once\n if self.memory.steps_since_last_update == 0:\n self.memory.update_frequency = int(\n 500 * np.exp(0.00001 * self.memory.steps_done)\n )\n # Ensure there's a maximum limit for UPDATE_FREQUENCY to prevent very infrequent updates\n self.memory.update_frequency = min(self.memory.update_frequency, 3500)\n","repo_name":"maldwg/ML-Essentials-Final-Project","sub_path":"agent_code/baby_terminator/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"7456239549","text":"import matplotlib.pyplot as plt\n\n\ndef plot_amino_acids(counter):\n # First plot: counts\n plt.bar(counter.keys(), counter.values())\n plt.xlabel(\"Animo Acid\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Amino Acid Frequency\")\n plt.xticks(rotation=90) # doesn't fit otherwise\n plt.subplots_adjust(top=0.93, bottom=0.15, right=0.93) # (unfortunately) hard-coded values to fit the axis labels\n plt.show()\n\n # Second plot: relative frequencies (histogram)\n value_sum = sum(counter.values())\n for i in counter.keys():\n counter[i] = counter[i] / value_sum\n\n plt.bar(counter.keys(), counter.values())\n plt.xlabel(\"Animo Acid\")\n plt.ylabel(\"Relative Frequency (%)\")\n plt.title(\"Amino Acid % Relative Frequency\")\n plt.xticks(rotation=90)\n plt.subplots_adjust(top=0.93, bottom=0.15)\n plt.show()","repo_name":"gabrieljkeller/bioinformatics-challenge","sub_path":"src/aa_plotter.py","file_name":"aa_plotter.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"34014609387","text":"# ------------------------------------------------------------------------------ #\n# @Author: Sebastian B. Mohr\n# @Email: \n# @Created: 2020-06-05 11:25:02\n# @Last Modified: 2020-06-08 13:54:49\n# ------------------------------------------------------------------------------ #\n\nimport importlib\n\ndef get_cps(countries=None):\n \"\"\"\n Function that returns all change points for different countries.\n They are loaded from there corresponding country python files.\n\n Parameters\n ----------\n countries : array, string, optional\n if none all countries get returned\n\n Return\n ------\n change_points: dict\n dict housing all changepoints with country names as keys\n\n \"\"\"\n # Default argument\n if countries is None:\n countries = get_possible_countries()\n \n # Cast to list\n if not isinstance(countries, list):\n countries = [countries]\n\n # Check if the countries are in the all possible countires list\n all_possible = get_possible_countries()\n for country in countries:\n assert country in all_possible ,f\"Country '{country}' not in possibles: {all_possible}\"\n\n # Get change points\n change_points = dict()\n for country in countries:\n module = importlib.__import__(country)\n change_points[country] = module.cps\n\n return change_points\n\n\nfrom pathlib import Path\ndef get_possible_countries():\n \"\"\"\n Returns list of all possible countries\n \"\"\"\n results = []\n \"\"\"\n Lists all python files and removes the change_points file.\n \"\"\"\n for path in Path('./').rglob('*.py'):\n if path == Path('change_points.py'):\n continue\n results.append(path.stem)\n return results","repo_name":"Priesemann-Group/covid19_research","sub_path":"countries/change_points/change_points.py","file_name":"change_points.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"28"}
+{"seq_id":"73375352716","text":"### Top down ###\n\n# memoization\ndp = [0 for i in range(12)]\ndp[0] = 1\ndp[1] = 1\ndp[2] = 2\n\ndef f(n):\n if dp[n] == 0:\n dp[n] = f(n-1) + f(n-2)+ f(n-3)\n return dp[n]\n\n# get T\nT = int(input())\n\n# get array of n\nnum_list = []\nfor i in range(T):\n n = int(input())\n num_list.append(n)\n\nfor n in num_list:\n print(f(n))","repo_name":"jwcheong0420/TIL","sub_path":"practice_coding/boj/dp/9095-top-down-2.py","file_name":"9095-top-down-2.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"23042844517","text":"import os\r\nimport subprocess\r\nimport numpy as np\r\nfrom pathlib import Path\r\nimport torch\r\nfrom transformers import AdamW\r\nfrom data_convert import split_dataset_to_tsv\r\nfrom data_bert_corpus import BERTCorpus\r\nfrom model_crf import CRF\r\nfrom model_bert import BERT\r\nfrom model_bert_trainer import BERTTrainer\r\nimport matplotlib as mpl\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\n\r\nplt.rc('font', family='sans-serif')\r\nftsz = 28\r\nfigw = 16\r\npparam = {'figure.figsize': (figw, figw),\r\n 'lines.linewidth': 4.0,\r\n 'legend.fontsize': ftsz,\r\n 'axes.labelsize': ftsz,\r\n 'axes.titlesize': ftsz,\r\n 'axes.linewidth': 2.0,\r\n 'xtick.labelsize': ftsz,\r\n 'xtick.major.size': 20,\r\n 'xtick.major.width': 2.0,\r\n 'ytick.labelsize': ftsz,\r\n 'ytick.major.size': 20,\r\n 'ytick.major.width': 2.0,\r\n 'font.size': ftsz}\r\nplt.rcParams.update(pparam)\r\ncm = plt.get_cmap('plasma')\r\n\r\n# clear console\r\n# os.system('clear')\r\n# console dimensions\r\nn, m = subprocess.check_output(['stty', 'size']).decode().split()\r\nn, m = int(n), int(m)\r\n\r\n# run in cpu mode if gpu not available\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n# deterministic run\r\nseed = 256\r\ntorch.manual_seed(seed)\r\ntorch.backends.cudnn.deterministic = True\r\n\r\n# parameters for corpus\r\nembedding_dim = 300\r\nmin_word_freq = 3\r\nmax_vocab_size = 25_000\r\n# dataset selection\r\ntest = False\r\nif test:\r\n # udpos or conll2000\r\n prefix = 'conll2000'\r\n data_path = Path(__file__).parent / '../data'\r\n max_sequence_length = 64\r\n batch_size = 64\r\nelse:\r\n # so far, there have been random batch mismatches during training/evaluation that have not been solved yet\r\n prefix = 'gmb_dataset'\r\n data_path = Path(__file__).parent / '../data/'\r\n # if not all([os.path.isfile(data_path.resolve().as_posix()+'/{}/{}.tsv'.format(prefix, f)) for f in ['train', 'dev', 'test']]):\r\n split_dataset_to_tsv(data_path.resolve().as_posix(), prefix, seed)\r\n max_sequence_length = 64\r\n batch_size = 64\r\n\r\n# switch for training from scratch or loading previously saved weights from file\r\nnew_calculation = True\r\n\r\n# corpus class for interacting with data\r\nbert_corpus = BERTCorpus(data_path=data_path.resolve().as_posix(), max_sequence_length=max_sequence_length,\r\n batch_size=batch_size, device=device, test=test, prefix=prefix)\r\n\r\n# size of vocabs for texts and tags\r\ntag_vocab_size = len(bert_corpus.tag_field.vocab)\r\ntag_names = bert_corpus.tag_field.vocab.itos\r\n\r\n# print information about vocabularies\r\nprint(m*'-')\r\nprint('tag vocabulary built')\r\nprint('unique tokens in tag vocabulary: {}'.format(tag_vocab_size))\r\nprint('tags: '+(tag_vocab_size*'{} ').format(*tag_names))\r\nprint(m*'-')\r\n\r\n# print information about datasets\r\nprint('train set: {} sentences'.format(len(bert_corpus.train_set)))\r\nprint('valid set: {} sentences'.format(len(bert_corpus.valid_set)))\r\nprint('test set: {} sentences'.format(len(bert_corpus.valid_set)))\r\nprint(m*'-')\r\n\r\ntag_pad_idx = bert_corpus.tag_pad_idx\r\npad_token = bert_corpus.pad_token\r\n\r\ntry:\r\n CRF(tag_pad_idx, pad_token, tag_names)\r\n use_crf = True\r\n print('using crf for models')\r\nexcept:\r\n use_crf = False\r\n print('not using crf for models (incompatible tagging format)')\r\nprint(m*'-')\r\n\r\noptimizer_cls = AdamW\r\nfull_finetuning = False\r\nmax_grad_norm = 1.0\r\n\r\nbert = BERT(num_labels=tag_vocab_size, use_crf=use_crf, tag_pad_idx=tag_pad_idx, pad_token=pad_token, tag_names=tag_names)\r\n\r\nprint('BERTForTokenClassification model initialized with {} trainable parameters'.format(bert.count_parameters()))\r\nprint(bert)\r\nprint(m*'-')\r\n\r\nbert_trainer = BERTTrainer(model=bert, data=bert_corpus, optimizer_cls=optimizer_cls, full_finetuning=full_finetuning, max_grad_norm=max_grad_norm, device=device)\r\n\r\nn_epoch = 128\r\nbert_train_path = Path(__file__).parent / '../model/history/{}_{}_hist_train.pt'.format(prefix, 'bert')\r\nbert_valid_path = Path(__file__).parent / '../model/history/{}_{}_hist_valid.pt'.format(prefix, 'bert')\r\nbert_model_path = Path(__file__).parent / '../model/{}_{}_model.pt'.format(prefix, 'bert')\r\n\r\nif new_calculation:\r\n print('training BERTForTokenClassificatio model')\r\n print(m*'-')\r\n if os.path.isfile(bert_model_path):\r\n print('loading model checkpoint')\r\n bert_trainer.load_model(model_path=bert_model_path)\r\n bert_trainer.load_history(train_path=bert_train_path, valid_path=bert_valid_path)\r\n bert_trainer.train(n_epoch=n_epoch)\r\n bert_trainer.save_model(bert_model_path)\r\n bert_trainer.save_history(bert_train_path, bert_valid_path)\r\nelse:\r\n bert_trainer.load_model(bert_model_path)\r\n bert_trainer.load_history(bert_train_path, bert_valid_path)\r\nbert_trainer.test()\r\n\r\nvalid_bert_history = np.array(bert_trainer.get_history()[1])[:, 2, :]\r\n# initialize figure and axes\r\nfig, ax = plt.subplots()\r\n# remove spines on top and right\r\nax.spines['right'].set_visible(False)\r\nax.spines['top'].set_visible(False)\r\n# set axis ticks to left and bottom\r\nax.xaxis.set_ticks_position('bottom')\r\nax.yaxis.set_ticks_position('left')\r\n# plot losses\r\nax.plot(valid_bert_history.mean(1), color=cm(0.5), label='BERT')\r\nax.legend(loc='upper left')\r\nax.set_xlabel('Epoch')\r\nax.set_ylabel('F1 Score')\r\n# save figure\r\nfig.savefig('bert_f1_history.png')\r\nplt.close()","repo_name":"walkernr/NamedEntityRecognition","sub_path":"scripts/test_bert.py","file_name":"test_bert.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20187581836","text":"# Imported libraries\nimport numpy as np\nfrom numpy.linalg import eig\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pypsa\nimport time\nfrom matplotlib.colors import LinearSegmentedColormap\nimport cartopy\nimport cartopy.io.shapereader as shpreader\nimport cartopy.crs as ccrs\n\n#----------------------------------------------------------------------------%\n# CO2 CONSTRAINTS\n# Load data - CO2 constraint\n# Folder name of data files\ndirectory = \"C:/Users/jense/.spyder-py3/data_files/elec_only/\"\n# Name of file (must be in correct folder location)\nfilename = [\"postnetwork-elec_only_0.125_0.6.h5\",\n \"postnetwork-elec_only_0.125_0.5.h5\",\n \"postnetwork-elec_only_0.125_0.4.h5\",\n \"postnetwork-elec_only_0.125_0.3.h5\",\n \"postnetwork-elec_only_0.125_0.2.h5\",\n \"postnetwork-elec_only_0.125_0.1.h5\",\n \"postnetwork-elec_only_0.125_0.05.h5\"]\n\nfilename = filename[-1]\n\n# Network\nnetwork = pypsa.Network(directory+filename)\n\n# Get the names of the data\ndata_names = network.loads_t.p.columns\n\n# Get time stamps\ntime_index = network.loads_t.p.index\n\n# Array of 30 country load\nload = network.loads_t.p_set\n\n# Dataframe (array) for different generator technologies\nwind = pd.DataFrame(np.zeros([8760, 30]), index=time_index, columns=(data_names))\nsolar = pd.DataFrame(np.zeros([8760, 30]), index=time_index, columns=(data_names))\nhydro = pd.DataFrame(np.zeros([8760, 30]), index=time_index, columns=(data_names))\n\n# Counter for positioning in generator data\ncounter = 0\nfor i in network.generators.index:\n \n # Current value to insert into correct array and position\n value = np.array(network.generators_t.p)[:,counter]\n \n # Check for wind, solar and hydro\n if (i[-4:] == \"wind\"):\n wind[i[0:2]] += value\n \n elif (i[-5:] == \"solar\"):\n solar[i[0:2]] += value\n \n elif (i[-3:] == \"ror\"):\n hydro[i[0:2]] += value\n \n # Increase value of counter by 1\n counter += 1\n\n# List of prices\nprices = network.buses_t.marginal_price\n\n# List of nodal prices for each country\ncountry_price = prices[data_names] # [€/MWh]\ncountry_price_gas = prices[(data_names + ' gas')] # [€/MWh]\ncountry_price_H2 = prices[(data_names + ' H2')] # [€/MWh]\ncountry_price_battery = prices[(data_names + ' battery')] # [€/MWh]\n\n# Sum up all the prices into one for every country\nnodal_price = country_price.values + country_price_gas.values + country_price_H2.values + country_price_battery.values\nnodal_price = pd.DataFrame(data=nodal_price, index=time_index, columns=data_names)\n\n# Total price of the system (for comparison)\ntotal_price = network.objective\n\n# Mean of nodal price\nnodal_price_avg = np.mean(nodal_price, axis=0)\n\n# Subtract the average for a mean centered distribution \nB = nodal_price.values - nodal_price_avg.values\n\n# Normalisation constant\nc = (1 / (np.sqrt( np.sum( np.mean( ( (nodal_price - nodal_price_avg)**2 ), axis=0 ) ) ) ) )\n \n# Covariance matrix \"It is a measure of how much each of the dimensions varies from the mean with respect to each other.\"\nC = np.cov(B.T)\n\n# Stops if C is larger than [30 x 30]\nassert np.size(C) <= 900, \"C is too big\"\n\n# Eigen vector and values\neigen_values, eigen_vectors = eig(C)\n\n# Creating array to describe variance explained by each of the eigen values\nvariance_explained = []\n\nfor i in eigen_values:\n variance_explained.append((i/sum(eigen_values))*100)\n\n# Cumulative variance explained\nvariance_explained_cumulative = np.cumsum(variance_explained)\n\n# Define the eigen vectors in a new variable with names\nVT = pd.DataFrame(data=eigen_vectors, index=data_names)\n\n\n\n\n#%%\nfreq = '1W'\nnodal_price_day = nodal_price.resample(rule=freq).sum()\nload_day = load.resample(rule=freq).sum()\nwind_day = wind.resample(rule=freq).sum() / 1000\nsolar_day = solar.resample(rule=freq).sum() / 1000\nhydro_day = hydro.resample(rule=freq).sum() / 1000\n#%%\n\nfig1, ax1 = plt.subplots()\ncolor = 'red'\nax1.set_xlabel('Date (sample interval: ' + freq + ')')\nax1.set_ylabel('Avg. Nodal price $[ \\dfrac{€}{MWh} ]$', color=color)\nlns1 = ax1.plot(nodal_price_day.index, nodal_price_day.mean(axis=1), color='red', label='Nodal price')\nax1.tick_params(axis='y', labelcolor=color)\nax1.legend(bbox_to_anchor = (1.12,1))\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\ncolor = 'black'\nax2.set_ylabel('Avg. Nodal energy generation [GWh]', color=color) # we already handled the x-label with ax1\nlns2 = ax2.plot(wind_day.index, wind_day.mean(axis=1), color='skyblue', linestyle='--', label='Wind generation')\nlns3 = ax2.plot(solar_day.index, solar_day.mean(axis=1), color='gold', linestyle='--', label='Solar generation')\nlns4 = ax2.plot(hydro_day.index, hydro_day.mean(axis=1), color='aqua', linestyle='--', label='ROR generation')\nax2.tick_params(axis='y', labelcolor=color)\n\n# Combine axes to get all labels\nlns = lns1+lns2+lns3+lns4\nlabs = [l.get_label() for l in lns]\nax1.legend(lns, labs, bbox_to_anchor = (1.14,1))\n\nplt.title(filename)\nplt.grid(axis='y')\nplt.show()\n\n#%%\n##############################################################################\n#Plot country figure\nfig = plt.figure(figsize=(9, 9))\nax = plt.axes(projection=cartopy.crs.TransverseMercator(20))\nax.add_feature(cartopy.feature.BORDERS, linestyle='-', alpha=1)\nax.coastlines(resolution='10m')\nax.add_feature(cartopy.feature.OCEAN, facecolor=(0.6,0.8,1), alpha=0.30)\nax.set_extent ((-9.5, 32, 35, 71), cartopy.crs.PlateCarree())\nax.gridlines()\n\n\n# List of european countries not included in the data\neurope_not_included = {'AD', 'AL','AX','BY', 'FO', 'GG', 'GI', 'IM', 'IS', \n 'JE', 'LI', 'MC', 'MD', 'ME', 'MK', 'MT', 'RU', 'SM', \n 'UA', 'VA', 'XK'}\n\n# Create shapereader file name\nshpfilename = shpreader.natural_earth(resolution='10m',\n category='cultural',\n name='admin_0_countries')\n\n# Read the shapereader file\nreader = shpreader.Reader(shpfilename)\n\n# Record the reader\ncountries = reader.records()\n\n# Print keys() used to 'index' variable\n#print(country.attributes.keys())\n\n# Determine name_loop variable\nname_loop = 'start'\n\n# PC number showed (1 to 30)\nPC_NO = 2\n\n# Start for-loop\nfor country in countries:\n \n #If the countrie is in the list of the european countries not to include, color it gray\n if country.attributes['ISO_A2'] in europe_not_included:\n ax.add_geometries(country.geometry, ccrs.PlateCarree(), \n facecolor=(0.8, 0.8, 0.8), alpha=0.50, linewidth=0.15, \n edgecolor=\"black\", label=country.attributes['ADM0_A3'])\n \n elif country.attributes['REGION_UN'] == 'Europe':\n if country.attributes['NAME'] == 'Norway':\n name_loop = 'NO'\n \n elif country.attributes['NAME'] == 'France':\n name_loop = 'FR'\n \n else:\n name_loop = country.attributes['ISO_A2']\n \n #print(name_loop)\n for country_PSA in VT.index.values:\n if country_PSA == name_loop:\n #print(\"Match!\")\n color_value = VT.loc[country_PSA][PC_NO-1]\n #print(color_value)\n if color_value <= 0:\n #Farv rød\n color_value = np.absolute(color_value)*1.5\n ax.add_geometries(country.geometry, ccrs.PlateCarree(), \n facecolor=(1, 0, 0), alpha=(np.min([color_value, 1])), linewidth=0.15, \n edgecolor=\"black\", label=country.attributes['ADM0_A3'])\n \n \n else:\n # Farv grøn\n color_value = np.absolute(color_value)*1.5\n ax.add_geometries(country.geometry, ccrs.PlateCarree(), \n facecolor=(0, 1, 0), alpha=(np.min([color_value, 1])), linewidth=0.15, \n edgecolor=\"black\", label=country.attributes['ADM0_A3'])\n \n \n else:\n ax.add_geometries(country.geometry, ccrs.PlateCarree(), \n facecolor=(0.8, 0.8, 0.8), alpha=0.50, linewidth=0.15, \n edgecolor=\"black\", label=country.attributes['ADM0_A3'])\n \n\ntitle = ()\n \nplt.title(\"Colormap of Principle Component for Electricity Nodal Prices\")\nplt.legend([r'$\\lambda_{'+ str(PC_NO) + '}$ = ' + str(round(variance_explained[PC_NO-1],1)) + '%'], loc='upper left')\ntest = np.zeros([30,30])\ntest[0,0]=-1\ntest[0,29]=1\n\ncmap = LinearSegmentedColormap.from_list('mycmap', [(1,0,0),(1,0,0),(1,0.333,0.333),(1,0.666,0.666), 'white',(0.666,1,0.666),(0.333,1,0.333),(0,1,0),(0,1,0)])\n\ncax = fig.add_axes([0.87, 0.15, 0.02, 0.7])\nim = ax.imshow(test,cmap=cmap) \nplt.colorbar(im,cax=cax)\nplt.suptitle(filename,fontsize=20,x=.51,y=0.938)\n\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n#%%\ndef annuity(n,r):\n # n = number of periods\n # r = changes per period\n # Example\n # Calculate the annuity factor for an asset with lifetime n years and\n # discount rate of r with a total capital cost of 100€\n # e.g. annuity(20,0.05)*100€ = 8.024 € / Year\n # Over 20 years that result in a total cost of 8.024[€]*20[years] = 160.49[€]\n\n if r > 0:\n return r/(1. - 1./(1.+r)**n)\n else:\n return 1/n\n\noriginal_costs = pd.read_csv('C:/Users/jense/.spyder-py3/data_files/costs_2030.csv', index_col=(0,1))\ncost = pd.DataFrame(index=network.buses.index)\n\n# Onwind and offwind have both capital and marginal expenses, solar has only capital investments.\nonwind = (network.generators.filter(like='onwind',axis=0).p_nom_opt * network.generators.filter(like='onwind',axis=0).capital_cost).rename(lambda x : x[:2])\nonwind = onwind.groupby(onwind.index).sum() # group DE0+DE1+DE2, etc...\nonwind_marg =(network.generators_t.p.sum(axis=0).filter(like='onwind',axis=0) * network.generators.filter(like='onwind',axis=0).marginal_cost).rename(lambda x : x[:2])\nonwind_marg = onwind_marg.groupby(onwind_marg.index).sum() # group DE0+DE1+DE2, etc.\n\noffwind = (network.generators.filter(like='offwind',axis=0).p_nom_opt * network.generators.filter(like='offwind',axis=0).capital_cost).rename(lambda x : x[:2])\noffwind = offwind.reindex(onwind.index,fill_value=0) # fill empty values\noffwind_marg = (network.generators_t.p.sum(axis=0).filter(like='offwind',axis=0) * network.generators.filter(like='offwind',axis=0).marginal_cost).rename(lambda x : x[:2])\noffwind_marg = offwind_marg.reindex(onwind_marg.index,fill_value=0) # fill empty values\n\nsolar = (network.generators.filter(like='solar',axis=0).p_nom_opt * network.generators.filter(like='solar',axis=0).capital_cost).rename(lambda x : x[:2])\n\ncost['onwind'] = onwind + onwind_marg\ncost['offwind'] = offwind + offwind_marg\ncost['solar'] = solar\n\ncost['hydro'] = (original_costs.loc['hydro','FOM'].value/1e2 + annuity(original_costs.loc['hydro','lifetime'].value, 0.04)) *\\\n\t\t\t\t (original_costs.loc['hydro','investment'].value*1e3) *\\\n \t\t\t\t network.storage_units.loc[(data_names + ' hydro'),\"p_nom\"].rename(lambda x : x[:2])\n \ncost['ror'] = (original_costs.loc['ror','FOM'].value/1e2 + annuity(original_costs.loc['hydro','lifetime'].value, 0.04)) *\\\n \t\t\t\t (original_costs.loc['ror','investment'].value*1e3) *\\\n \t\t\t\t network.generators.loc[(data_names + ' ror'),\"p_nom\"].rename(lambda x : x[:2])\n \ncost['phs'] = (original_costs.loc['PHS','FOM'].value/1e2 + annuity(original_costs.loc['hydro','lifetime'].value, 0.04)) *\\\n \t\t\t\t (original_costs.loc['PHS','investment'].value*1e3) *\\\n \t\t\t\t network.storage_units.loc[(data_names + ' PHS'),\"p_nom\"].rename(lambda x : x[:2])\n\nbattery_links = (network.links.filter(like='battery charger',axis=0).p_nom_opt * network.links.filter(like='battery charger',axis=0).capital_cost).rename(lambda x : x[:2])\nbattery_stores = (network.stores.filter(like='battery',axis=0).e_nom_opt * network.stores.filter(like='battery',axis=0).capital_cost).rename(lambda x : x[:2])\ncost['battery'] = battery_links + battery_stores\n\nhydrogen_links_1 = (network.links.filter(like='H2 Electrolysis',axis=0).p_nom_opt * network.links.filter(like='H2 Electrolysis',axis=0).capital_cost).rename(lambda x : x[:2])\nhydrogen_links_2 = (network.links.filter(like='H2 Fuel Cell',axis=0).p_nom_opt * network.links.filter(like='H2 Fuel Cell',axis=0).capital_cost).rename(lambda x : x[:2])\nhydrogen_stores_a = (network.stores.filter(like='H2 Store tank',axis=0).e_nom_opt * network.stores.filter(like='H2 Store tank',axis=0).capital_cost).rename(lambda x : x[:2])\nhydrogen_stores_b = (network.stores.filter(like='H2 Store underground',axis=0).e_nom_opt * network.stores.filter(like='H2 Store underground',axis=0).capital_cost).rename(lambda x : x[:2])\ncost['hydrogen storage'] = hydrogen_links_1 + hydrogen_links_2 + hydrogen_stores_a + hydrogen_stores_b\n\ngas_M_1 = (network.stores_t.p.sum(axis=0).filter(like='gas',axis=0) * network.stores.filter(like='gas',axis=0).marginal_cost).rename(lambda x : x[:2])\ngas_M_2 = (network.links_t.p0.sum(axis=0).filter(like='OCGT',axis=0) * network.links.filter(like='OCGT',axis=0).marginal_cost).rename(lambda x : x[:2])\ngas_C = (network.links.filter(like='OCGT',axis=0).p_nom_opt * network.links.filter(like='OCGT',axis=0).capital_cost).rename(lambda x : x[:2])\ncost['gas'] = gas_M_1 + gas_M_2 + gas_C\n\n## Transmission.\n# Need to be split onto each country which can be done in many ways. Example: 50/50 on the two connected countries. Not fair for transition countries like CH.\ncost['transmission'] = (network.links[network.links.p_min_pu == -1].p_nom_opt * network.links[network.links.p_min_pu == -1].capital_cost).sum() *\\\n\t\t\t\t\t\t (network.loads_t.p.sum(axis=0) / network.loads_t.p.sum().sum()) # transmission cost split equally by load. \n\ncost = cost.fillna(0)\ncost = cost.stack()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"JonasVind/RD_Project_Code_and_Plots","sub_path":"Code/elec_only__electrical_nodal_prices.py","file_name":"elec_only__electrical_nodal_prices.py","file_ext":"py","file_size_in_byte":13895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"39445829754","text":"#from imports import *\nimport time\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport h5py\nimport os\nimport re\n\nfrom nflows.flows.base import Flow\nfrom nflows.flows.autoregressive import MaskedAutoregressiveFlow\nfrom nflows.distributions.normal import StandardNormal, ConditionalDiagonalNormal\nfrom nflows.transforms.base import CompositeTransform\nfrom nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform, MaskedPiecewiseQuadraticAutoregressiveTransform, MaskedPiecewiseRationalQuadraticAutoregressiveTransform\nfrom nflows.transforms.permutations import ReversePermutation, RandomPermutation\n\nfrom .customFlows import IndependentRQS\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.distributions import MultivariateNormal\nimport torch.utils.data as utils\nfrom torch.nn.utils import clip_grad_norm_\nfrom copy import deepcopy\n\nfrom tqdm import tqdm\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ntorch.set_default_tensor_type('torch.cuda.FloatTensor') if torch.cuda.is_available() else print ('cpu')\n\nclass InfiniteLoader(utils.DataLoader):\n \"\"\"A data loader that can load a dataset repeatedly.\"\"\"\n\n def __init__(self, num_epochs=None, *args, **kwargs):\n \"\"\"Constructor.\n\n Args:\n dataset: A `Dataset` object to be loaded.\n batch_size: int, the size of each batch.\n shuffle: bool, whether to shuffle the dataset after each epoch.\n drop_last: bool, whether to drop last batch if its size is less than\n `batch_size`.\n num_epochs: int or None, number of epochs to iterate over the dataset.\n If None, defaults to infinity.\n \"\"\"\n super().__init__(\n *args, **kwargs\n )\n self.finite_iterable = super().__iter__()\n self.counter = 0\n self.num_epochs = float('inf') if num_epochs is None else num_epochs\n\n def __next__(self):\n try:\n return next(self.finite_iterable)\n except StopIteration:\n self.counter += 1\n if self.counter >= self.num_epochs:\n raise StopIteration\n self.finite_iterable = super().__iter__()\n return next(self.finite_iterable)\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return None\n\n\nclass chainedNFTrainer:\n def __init__(self,projName,bkg_train,bkg_test,data_train,data_test,varNames,control=[],NF_kwargs={},outDir=\"NF_models_QR/\",rangeScale=3,separateScale=False,minmax=True,useScale=True):\n assert len(varNames) == bkg_train.shape[1] and len(varNames) == data_train.shape[1]\n self.projName = projName\n self.varNames = varNames\n self.controlVars = [varNames[k] for k in control]\n self.control = control\n self.fitVars = [varNames[i] for i in range(len(varNames)) if i not in control]\n self.NF_kwargs = NF_kwargs\n if not os.path.exists(outDir):\n os.mkdir(outDir)\n self.outDir = f\"{outDir}/{projName}/\"\n if not os.path.exists(self.outDir):\n os.makedirs(self.outDir)\n self.rangeScale = rangeScale\n self.separateScale = separateScale\n self.minmax = minmax\n self.useScale = useScale\n\n self.varDict = {v:v for v in varNames}\n\n # saving means, stds, mins, maxes for scaling\n bkg_all = np.concatenate((bkg_train,bkg_test),axis=0)\n self.bkg_maxes = bkg_all.max(axis=0)\n self.bkg_mins = bkg_all.min(axis=0)\n self.bkg_means = bkg_all.mean(axis=0)\n self.bkg_stds = bkg_all.std(axis=0)\n data_all = np.concatenate((data_train,data_test),axis=0)\n self.data_maxes = data_all.max(axis=0)\n self.data_mins = data_all.min(axis=0)\n self.data_means = data_all.mean(axis=0)\n self.data_stds = data_all.std(axis=0)\n\n self.writeInfo()\n\n # normalize data for trainings\n bkg_train = self.scale(bkg_train,\"bkg\",forward=True)\n bkg_test = self.scale(bkg_test,\"bkg\",forward=True)\n data_train = self.scale(data_train,\"data\",forward=True)\n data_test = self.scale(data_test,\"data\",forward=True)\n\n # set up dictionaries of train/test data\n self.data_train = {self.varNames[i]:data_train[:,i].reshape(-1,1) for i in range(len(self.varNames))}\n self.data_test = {self.varNames[i]:data_test[:,i].reshape(-1,1) for i in range(len(self.varNames))}\n self.bkg_train = {self.varNames[i]:bkg_train[:,i].reshape(-1,1) for i in range(len(self.varNames))}\n self.bkg_test = {self.varNames[i]:bkg_test[:,i].reshape(-1,1) for i in range(len(self.varNames))}\n\n # variables to track training -- keep *raw* values in the 'corrected' collection b/c data and MC have different preprocessing scaling\n self.correctedBkg_train = {self.varNames[i]:self.scale(bkg_train[:,i],\"bkg\",forward=False,idx=i).reshape(-1,1) for i in range(len(self.varNames))}\n self.correctedBkg_test = {self.varNames[i]:self.scale(bkg_test[:,i],\"bkg\",forward=False,idx=i).reshape(-1,1) for i in range(len(self.varNames))}\n self.data_models = [None for _ in range(len(self.fitVars))]\n self.bkg_models = [None for _ in range(len(self.fitVars))]\n self.data_model_locs = [None for _ in range(len(self.fitVars))]\n self.bkg_model_locs = [None for _ in range(len(self.fitVars))]\n self.current = 0 # index of variable currently being corrected (index in self.varOrder)\n self.bkg_trainings = {n:{} for n in self.fitVars}\n self.data_trainings = {n:{} for n in self.fitVars}\n self.bkg_base_dists = {n:None for n in self.fitVars}\n\n def writeInfo(self):\n # write basic info to json in base directory\n out_js = {}\n out_js['projName'] = self.projName\n out_js['varNames'] = self.varNames\n out_js['control'] = self.control\n out_js['controlVars'] = self.controlVars\n out_js['fitVars'] = self.fitVars\n out_js['baseDir'] = self.outDir\n out_js['rangeScale'] = self.rangeScale\n out_js['separateScale'] = self.separateScale\n out_js['minmax'] = self.minmax\n out_js['useScale'] = self.useScale\n out_js['bkg_maxes'] = self.bkg_maxes.tolist()\n out_js['bkg_mins'] = self.bkg_mins.tolist()\n out_js['bkg_means'] = self.bkg_means.tolist()\n out_js['bkg_stds'] = self.bkg_stds.tolist()\n out_js['data_maxes'] = self.data_maxes.tolist()\n out_js['data_mins'] = self.data_mins.tolist()\n out_js['data_means'] = self.data_means.tolist()\n out_js['data_stds'] = self.data_stds.tolist()\n with open(f\"{self.outDir}/info.json\",\"w\") as info_out:\n json.dump(out_js,info_out,indent=4)\n\n @property\n def currentDir(self):\n currDir = f\"{self.outDir}/step{self.current}_{self.fitVars[self.current]}/\"\n if not os.path.isdir(currDir):\n os.makedirs(currDir)\n return currDir\n\n def scale(self,inputs,mode,forward=True,idx=None):\n if not self.useScale:\n return inputs\n else:\n if not self.separateScale:\n mins = np.minimum(self.bkg_mins,self.data_mins)\n maxes = np.maximum(self.bkg_maxes,self.data_maxes)\n else:\n mins = self.bkg_mins if mode=='bkg' else self.data_mins\n maxes = self.bkg_maxes if mode=='bkg' else self.data_maxes\n means = self.bkg_means\n stds = self.bkg_stds\n scale = np.where(np.abs(maxes)>np.abs(mins),np.abs(maxes),np.abs(mins))\n if idx is not None:\n mins,maxes,means,stds = mins[idx],maxes[idx],means[idx],stds[idx]\n scale = scale[idx]\n if forward:\n if self.minmax:\n inputs = 2*self.rangeScale*((inputs-mins)/(maxes-mins)-0.5)\n else:\n inputs = (inputs-means)/stds\n else:\n if self.minmax:\n inputs = (maxes-mins)*(inputs/(2*self.rangeScale) + 0.5) + mins\n else:\n inputs = inputs*stds + means\n return inputs\n\n def new_flow(self,num_features,num_context,kwargs,base_dist=None):\n return make_flow(num_features,num_context,kwargs,base_dist=base_dist)\n\n def get_flow(self,n_features,n_context,loc,kwargs,base_dist=None):\n flow = self.new_flow(n_features,n_context,kwargs,base_dist=base_dist)\n flow.load_state_dict(torch.load(loc))\n flow.eval()\n return flow\n\n def train_flow(self,flow,loader,name,kwargs,n_avg=100,anneal=True):\n flow = flow.to(device)\n if kwargs['wd']>0:\n optimizer = optim.Adam(flow.parameters(),lr=kwargs['learning_rate'],weight_decay=kwargs['wd'])\n else:\n optimizer = optim.Adam(flow.parameters(),lr=kwargs['learning_rate'])\n if anneal:\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max=kwargs['n_epoch'],eta_min=0.01*kwargs['learning_rate'],verbose=False)\n\n min_loss = 1e+8\n train_losses = []\n patience_count = 0\n saveName = f\"{self.currentDir}/{name}.pt\"\n\n print(\"Training flow {0}\".format(name))\n\n tbar = tqdm(range(int(kwargs['n_epoch'])))\n avg_losses = []\n for i in tbar:\n if patience_count == kwargs['patience']:\n break\n epoch_losses = []\n x = next(loader)\n #for batch_idx, x in enumerate(loader):\n inputs,context = x\n optimizer.zero_grad()\n if torch.all(context==0):\n loss = -flow.log_prob(inputs=inputs)[0].mean()\n else:\n loss = -flow.log_prob(inputs=inputs,context=context)[0].mean()\n loss.backward()\n #gclip = 2\n #clip_grad_norm_(flow.parameters(), gclip)\n optimizer.step()\n train_losses.append(loss.item())\n if loss.item() < min_loss:\n min_loss = loss.item()\n patience_count = 0\n torch.save(flow.state_dict(),saveName)\n else:\n patience_count += 1\n avg_losses.append(loss.item())\n if i+1 > n_avg:\n avg_losses = avg_losses[1:]\n l_print = np.mean(avg_losses)\n s = 'Loss: {0}, p = {1}'.format(l_print,patience_count)\n tbar.set_description(s)\n if anneal:\n scheduler.step()\n\n flow.load_state_dict(torch.load(saveName))\n flow.eval()\n torch.cuda.empty_cache()\n flow = flow.to('cpu')\n\n return flow, saveName, train_losses\n\n def trainCurrentBkg(self,bs=10000,n_epoch=100,patience=20,learning_rate=1e-3,wd=0,anneal=True):\n train_kwargs = {'n_epoch':n_epoch,'patience':patience,'learning_rate':learning_rate,'wd':wd}\n currentVar = self.fitVars[self.current]\n contextVars = self.controlVars+self.fitVars[:self.current]\n contextIdx = [self.varNames.index(v) for v in contextVars]\n\n # train bkg flow\n if len(contextVars) > 0:\n bkg_train_context = torch.tensor(self.scale(\n np.concatenate([self.correctedBkg_train[n] for n in contextVars],axis=1),\n \"bkg\",forward=True,idx=contextIdx),dtype=torch.float32,device=device)\n else:\n bkg_train_context = torch.zeros(self.bkg_train[currentVar].shape,dtype=torch.float32,device=device)\n bkg_train_var = torch.tensor(self.bkg_train[currentVar],dtype=torch.float32,device=device)\n bkg_train_dataset = utils.TensorDataset(bkg_train_var,bkg_train_context)\n bkg_loader = InfiniteLoader(dataset=bkg_train_dataset,batch_size=bs,shuffle=True,\n generator=torch.Generator(device='cuda'))\n bkg_flow = self.new_flow(1,len(contextVars),self.NF_kwargs)\n bkg_flowName = \"bkgFlow_step{0}_{1}\".format(self.current,self.fitVars[self.current])\n bkg_flow, bkg_flowLoc, bkg_trainLosses = self.train_flow(bkg_flow,bkg_loader,bkg_flowName,train_kwargs,anneal=anneal)\n self.bkg_trainings[currentVar]['flowLoc'] = bkg_flowLoc\n self.bkg_trainings[currentVar]['flowName'] = bkg_flowName\n self.bkg_trainings[currentVar]['contextVars'] = contextVars\n self.bkg_trainings[currentVar]['NF_kwargs'] = self.NF_kwargs\n self.bkg_trainings[currentVar]['train_kwargs'] = train_kwargs\n self.bkg_trainings[currentVar]['losses'] = bkg_trainLosses\n\n #encoder = deepcopy(bkg_flow._distribution._context_encoder.to('cpu'))\n #for param in encoder.parameters():\n # param.requires_grad = False\n #self.bkg_base_dists[currentVar] = ConditionalDiagonalNormal(shape=[1],context_encoder=encoder)\n\n bkg_flowConfig = \"bkgFlowConfig_step{0}_{1}\".format(self.current,self.fitVars[self.current])\n with open(f\"{self.currentDir}/{bkg_flowConfig}.json\",\"w\") as cfg_out:\n json.dump(self.bkg_trainings[currentVar],cfg_out,indent=4)\n\n plt.figure(figsize=(8,6))\n w = int(self.bkg_train[currentVar].shape[0]/(5*bs))\n smooth = np.convolve(np.ones(w),bkg_trainLosses,mode='valid')/w\n xvals = np.linspace(0,len(bkg_trainLosses),len(smooth))\n plt.plot(xvals,smooth)\n #plt.plot(np.arange(len(bkg_trainLosses)),bkg_trainLosses)\n plt.title(bkg_flowName)\n plt.xlabel('Epoch',fontsize=16)\n plt.ylabel('Loss',fontsize=16)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.savefig(f\"{self.currentDir}/trainCurve_bkg.pdf\")\n\n del bkg_train_context, bkg_train_var, bkg_train_dataset, bkg_loader, bkg_flow\n torch.cuda.empty_cache()\n\n def trainCurrentData(self,bs=10000,n_epoch=100,patience=20,learning_rate=1e-3,wd=0,anneal=True):\n train_kwargs = {'n_epoch':n_epoch,'patience':patience,'learning_rate':learning_rate,'wd':wd}\n currentVar = self.fitVars[self.current]\n contextVars = self.controlVars+self.fitVars[:self.current]\n\n # train data flow\n if len(contextVars) > 0:\n data_train_context = torch.tensor(np.concatenate([self.data_train[n] for n in contextVars],axis=1),dtype=torch.float32,device=device)\n else:\n data_train_context = torch.zeros(self.data_train[currentVar].shape,dtype=torch.float32,device=device)\n data_train_var = torch.tensor(self.data_train[currentVar],dtype=torch.float32,device=device)\n data_train_dataset = utils.TensorDataset(data_train_var,data_train_context)\n data_loader = InfiniteLoader(dataset=data_train_dataset,batch_size=bs,shuffle=True,\n generator=torch.Generator(device='cuda'))\n data_flow = self.new_flow(1,len(contextVars),self.NF_kwargs,base_dist=self.bkg_base_dists[currentVar])\n data_flowName = \"dataFlow_step{0}_{1}\".format(self.current,self.fitVars[self.current])\n data_flow, data_flowLoc, data_trainLosses = self.train_flow(data_flow,data_loader,data_flowName,train_kwargs,anneal=anneal)\n self.data_trainings[currentVar]['flowLoc'] = data_flowLoc\n self.data_trainings[currentVar]['flowName'] = data_flowName\n self.data_trainings[currentVar]['contextVars'] = contextVars\n self.data_trainings[currentVar]['NF_kwargs'] = self.NF_kwargs\n self.data_trainings[currentVar]['train_kwargs'] = train_kwargs\n self.data_trainings[currentVar]['losses'] = data_trainLosses\n\n data_flowConfig = \"dataFlowConfig_step{0}_{1}\".format(self.current,self.fitVars[self.current])\n with open(f\"{self.currentDir}/{data_flowConfig}.json\",\"w\") as cfg_out:\n json.dump(self.data_trainings[currentVar],cfg_out,indent=4)\n\n plt.figure(figsize=(8,6))\n w = int(self.data_train[currentVar].shape[0]/(5*bs))\n smooth = np.convolve(np.ones(w),data_trainLosses,mode='valid')/w\n xvals = np.linspace(0,len(data_trainLosses),len(smooth))\n plt.plot(xvals,smooth)\n #plt.plot(np.arange(len(data_trainLosses)),data_trainLosses)\n plt.title(data_flowName)\n plt.xlabel('Epoch',fontsize=16)\n plt.ylabel('Loss',fontsize=16)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.savefig(f\"{self.currentDir}/trainCurve_data.pdf\")\n del data_train_context, data_train_var, data_train_dataset, data_loader, data_flow\n torch.cuda.empty_cache()\n\n def trainCurrent(self,bs=10000,n_epoch=100,patience=20,learning_rate=1e-3,wd=0,anneal=True):\n self.trainCurrentBkg(bs=bs,n_epoch=n_epoch,patience=patience,\n learning_rate=learning_rate,wd=wd,anneal=anneal)\n self.trainCurrentData(bs=bs,n_epoch=n_epoch,patience=patience,\n learning_rate=learning_rate,wd=wd,anneal=anneal)\n self.plotDensity(bkg=True,data=False)\n self.plotDensity(bkg=False,data=True)\n\n def loadCurrent(self):\n currentVar = self.fitVars[self.current]\n with open(f\"{self.currentDir}/dataFlowConfig_step{self.current}_{currentVar}.json\",\"r\") as f:\n self.data_trainings[currentVar] = json.load(f)\n with open(f\"{self.currentDir}/bkgFlowConfig_step{self.current}_{currentVar}.json\",\"r\") as f:\n self.bkg_trainings[currentVar] = json.load(f)\n\n def plotDensity(self,bkg=False,data=False,ylim=None):\n currentVar = self.fitVars[self.current]\n contextVars = self.controlVars+self.fitVars[:self.current]\n contextIdx = [self.varNames.index(v) for v in contextVars]\n\n if bkg:\n bkg_flow = self.get_flow(1,len(self.bkg_trainings[currentVar]['contextVars']),\n self.bkg_trainings[currentVar]['flowLoc'],\n self.bkg_trainings[currentVar]['NF_kwargs']).to(device)\n bkg_flow.eval()\n bkg_test_var = self.bkg_test[currentVar]\n nTest = bkg_test_var.shape[0]\n n_per = 20000\n split = np.array_split(np.arange(nTest),nTest//n_per + 1)\n samples = []\n if len(contextVars) > 0:\n bkg_test_context = self.scale(np.concatenate([self.correctedBkg_test[n] for n in contextVars],axis=1),\"bkg\",forward=True,idx=contextIdx)\n for k in split:\n samp_k = torch.tensor(bkg_test_context[k],dtype=torch.float32,device=device)\n with torch.no_grad():\n samples.append(bkg_flow.sample(1,context=samp_k).detach().cpu().numpy().reshape(k.shape[0],-1))\n del samp_k\n torch.cuda.empty_cache()\n del bkg_test_context\n else:\n with torch.no_grad():\n samples.append(bkg_flow.sample(bkg_test_var.shape[0]).detach().cpu().numpy())\n bkg_samples = np.concatenate(samples,axis=0)\n plotName = f\"{self.currentDir}/bkg_fitDensity_step{self.current}_{self.fitVars[self.current]}.pdf\"\n self.plotPair(bkg_test_var[:,0],bkg_samples[:,0],'bkg',plotName,log=False,ylim=ylim)\n plotName = f\"{self.currentDir}/bkg_fitDensity_step{self.current}_{self.fitVars[self.current]}_log.pdf\"\n self.plotPair(bkg_test_var[:,0],bkg_samples[:,0],'bkg',plotName,log=True,ylim=ylim)\n del bkg_flow, bkg_samples, bkg_test_var\n torch.cuda.empty_cache()\n\n if data:\n base_dist = self.bkg_base_dists[currentVar]\n data_flow = self.get_flow(1,len(self.data_trainings[currentVar]['contextVars']),self.data_trainings[currentVar]['flowLoc'],self.data_trainings[currentVar]['NF_kwargs'],base_dist=base_dist).to(device)\n data_flow.eval()\n data_test_var = self.data_test[currentVar]\n nTest = data_test_var.shape[0]\n n_per = 20000\n split = np.array_split(np.arange(nTest),nTest//n_per + 1)\n samples = []\n if len(contextVars) > 0:\n data_test_context = np.concatenate([self.data_test[n] for n in contextVars],axis=1)\n for k in split:\n samp_k = torch.tensor(data_test_context[k],dtype=torch.float32,device=device)\n samples.append(data_flow.sample(1,context=samp_k).detach().cpu().numpy().reshape(k.shape[0],-1))\n del samp_k\n torch.cuda.empty_cache()\n del data_test_context\n else:\n samples.append(data_flow.sample(data_test_var.shape[0]).detach().cpu().numpy())\n data_samples = np.concatenate(samples,axis=0)\n plotName = f\"{self.currentDir}/data_fitDensity_step{self.current}_{self.fitVars[self.current]}.pdf\"\n self.plotPair(data_test_var[:,0],data_samples[:,0],'data',plotName,log=False,ylim=ylim)\n plotName = f\"{self.currentDir}/data_fitDensity_step{self.current}_{self.fitVars[self.current]}_log.pdf\"\n self.plotPair(data_test_var[:,0],data_samples[:,0],'data',plotName,log=True,ylim=ylim)\n del data_flow, data_samples, data_test_var\n torch.cuda.empty_cache()\n\n def plotPair(self,ref,samples,sampName,saveName,log=False,ylim=None):\n plt.subplots(figsize=(8,6),nrows=2,ncols=1,gridspec_kw={'height_ratios':[3,1]},sharex=True)\n plt.subplot(211)\n h1,bins,_ = plt.hist(ref,bins=np.linspace(-self.rangeScale,self.rangeScale,50),\n density=True,histtype='step',label=f'{sampName} test')\n h2,bins,_ = plt.hist(samples,bins=np.linspace(-self.rangeScale,self.rangeScale,50),\n density=True,histtype='step',label=f'{sampName} samples')\n if log:\n plt.yscale('log')\n plt.legend()\n plt.subplot(212)\n w = bins[1]-bins[0]\n c = (bins[1:]+bins[:-1])/2\n h = np.divide(h2,h1,where=h1>0)\n plt.bar(x=c,height=h,width=w,align='center')\n if ylim is not None:\n plt.ylim(ylim)\n plt.yticks(np.linspace(ylim[0],ylim[1],5))\n else:\n plt.ylim([0.8,1.2])\n plt.yticks(np.arange(0.8,1.3,0.1))\n plt.grid(axis='y')\n plt.ylabel(\"Ratio\")\n plt.xlabel(self.fitVars[self.current])\n plt.savefig(saveName)\n\n def correctCurrent(self,n_per=10000,pad=0.1):\n currentVar = self.fitVars[self.current]\n contextVars = self.controlVars+self.fitVars[:self.current]\n contextIdx = [self.varNames.index(v) for v in contextVars]\n\n bkg_flow = self.get_flow(1,len(self.bkg_trainings[currentVar]['contextVars']),self.bkg_trainings[currentVar]['flowLoc'],self.bkg_trainings[currentVar]['NF_kwargs']).to(device)\n data_flow = self.get_flow(1,len(self.data_trainings[currentVar]['contextVars']),self.data_trainings[currentVar]['flowLoc'],self.data_trainings[currentVar]['NF_kwargs']).to(device)\n\n\n # correct training set\n if len(contextVars) > 0:\n bkg_train_context_bkg = self.scale(np.concatenate([self.correctedBkg_train[n] for n in contextVars],axis=1),\"bkg\",forward=True,idx=contextIdx)\n bkg_train_context_data = self.scale(np.concatenate([self.correctedBkg_train[n] for n in contextVars],axis=1),\"data\",forward=True,idx=contextIdx)\n else:\n bkg_train_context_bkg = np.zeros(self.bkg_train[currentVar].shape)\n bkg_train_context_data = np.zeros(self.bkg_train[currentVar].shape)\n bkg_train_var = self.bkg_train[currentVar]\n nTrain = bkg_train_context_bkg.shape[0]\n split = np.array_split(np.arange(nTrain),nTrain//n_per + 1)\n bkg_train_context_bkg = [bkg_train_context_bkg[k] for k in split]\n bkg_train_context_data = [bkg_train_context_data[k] for k in split]\n bkg_train_var = [bkg_train_var[k] for k in split]\n bkg_train_corr = []\n for i in tqdm(range(len(split))):\n inputs = torch.tensor(bkg_train_var[i],dtype=torch.float32,device=device)\n context_bkg = torch.tensor(bkg_train_context_bkg[i],dtype=torch.float32,device=device)\n context_data = torch.tensor(bkg_train_context_data[i],dtype=torch.float32,device=device)\n if torch.all(context_bkg==0):\n noise = bkg_flow.transform_to_noise(inputs=inputs)\n corrected = data_flow._transform.inverse(noise)[0]\n else:\n noise = bkg_flow.transform_to_noise(inputs=inputs,context=context_bkg)\n if type(bkg_flow._distribution) == ConditionalDiagonalNormal:\n mean_bkg,log_std_bkg = bkg_flow._distribution._compute_params(context_bkg)\n mean_data,log_std_data = data_flow._distribution._compute_params(context_bkg)\n std_bkg = torch.exp(log_std_bkg)\n std_data = torch.exp(log_std_data)\n noise = (noise-mean_bkg)/std_bkg\n noise = std_data*noise + mean_data\n corrected = data_flow._transform.inverse(noise,context=context_data)[0]\n bkg_train_corr.append(corrected.detach().cpu().numpy())\n del inputs,context_bkg,context_data,noise,corrected\n torch.cuda.empty_cache()\n bkg_train_corr = self.scale(np.concatenate(bkg_train_corr,axis=0),\"data\",forward=False,idx=self.varNames.index(currentVar))\n self.correctedBkg_train[currentVar] = np.copy(bkg_train_corr)\n del bkg_train_corr\n\n # correct test set\n if len(contextVars) > 0:\n bkg_test_context_bkg = self.scale(np.concatenate([self.correctedBkg_test[n] for n in contextVars],axis=1),\"bkg\",forward=True,idx=contextIdx)\n bkg_test_context_data = self.scale(np.concatenate([self.correctedBkg_test[n] for n in contextVars],axis=1),\"data\",forward=True,idx=contextIdx)\n else:\n bkg_test_context_bkg = np.zeros(self.bkg_test[currentVar].shape)\n bkg_test_context_data = np.zeros(self.bkg_test[currentVar].shape)\n bkg_test_var = self.bkg_test[currentVar]\n nTest = bkg_test_context_bkg.shape[0]\n split = np.array_split(np.arange(nTest),nTest//n_per + 1)\n bkg_test_context_bkg = [bkg_test_context_bkg[k] for k in split]\n bkg_test_context_data = [bkg_test_context_data[k] for k in split]\n bkg_test_var = [bkg_test_var[k] for k in split]\n bkg_test_corr = []\n for i in tqdm(range(len(split))):\n inputs = torch.tensor(bkg_test_var[i],dtype=torch.float32,device=device)\n context_bkg = torch.tensor(bkg_test_context_bkg[i],dtype=torch.float32,device=device)\n context_data = torch.tensor(bkg_test_context_data[i],dtype=torch.float32,device=device)\n if torch.all(context_bkg==0):\n noise = bkg_flow.transform_to_noise(inputs=inputs)\n corrected = data_flow._transform.inverse(noise)[0]\n else:\n noise = bkg_flow.transform_to_noise(inputs=inputs,context=context_bkg)\n if type(bkg_flow._distribution) == ConditionalDiagonalNormal:\n mean_bkg,log_std_bkg = bkg_flow._distribution._compute_params(context_bkg)\n mean_data,log_std_data = data_flow._distribution._compute_params(context_bkg)\n std_bkg = torch.exp(log_std_bkg)\n std_data = torch.exp(log_std_data)\n noise = (noise-mean_bkg)/std_bkg\n noise = std_data*noise + mean_data\n corrected = data_flow._transform.inverse(noise,context=context_data)[0]\n bkg_test_corr.append(corrected.detach().cpu().numpy())\n del inputs,context_bkg,context_data,noise,corrected\n torch.cuda.empty_cache()\n bkg_test_corr = self.scale(np.concatenate(bkg_test_corr,axis=0),\"data\",forward=False,idx=self.varNames.index(currentVar))\n self.correctedBkg_test[currentVar] = np.copy(bkg_test_corr)\n del bkg_test_corr\n\n del bkg_flow, data_flow\n torch.cuda.empty_cache()\n\n def correctFull(self,bkg,n_per=10000):\n bkg_corr = {self.varNames[i]:bkg[:,i].reshape(-1,1) for i in range(len(self.varNames))}\n bkg = self.scale(bkg,\"bkg\",forward=True)\n bkg = {self.varNames[i]:bkg[:,i].reshape(-1,1) for i in range(len(self.varNames))}\n\n for j,v in enumerate(self.fitVars):\n currentVar = v\n contextVars = self.controlVars+self.fitVars[:j]\n contextIdx = [self.varNames.index(v) for v in contextVars]\n\n bkg_flow = self.get_flow(1,len(self.bkg_trainings[currentVar]['contextVars']),self.bkg_trainings[currentVar]['flowLoc'],self.bkg_trainings[currentVar]['NF_kwargs']).to(device)\n base_dist = self.bkg_base_dists[currentVar]\n data_flow = self.get_flow(1,len(self.data_trainings[currentVar]['contextVars']),self.data_trainings[currentVar]['flowLoc'],self.data_trainings[currentVar]['NF_kwargs'],base_dist=base_dist).to(device)\n\n # correct training set\n if len(contextVars) > 0:\n context_bkg = self.scale(np.concatenate([bkg_corr[n] for n in contextVars],axis=1),\"bkg\",forward=True,idx=contextIdx)\n context_data = self.scale(np.concatenate([bkg_corr[n] for n in contextVars],axis=1),\"data\",forward=True,idx=contextIdx)\n else:\n context_bkg = np.zeros(bkg[currentVar].shape)\n context_data = np.zeros(bkg[currentVar].shape)\n inputs = bkg[currentVar]\n nEvts = inputs.shape[0]\n split = np.array_split(np.arange(nEvts),nEvts//n_per + 1)\n context_bkg = [context_bkg[k] for k in split]\n context_data = [context_data[k] for k in split]\n inputs = [inputs[k] for k in split]\n corr = []\n for i in tqdm(range(len(split))):\n inputs_i = torch.tensor(inputs[i],dtype=torch.float32,device=device)\n context_i_bkg = torch.tensor(context_bkg[i],dtype=torch.float32,device=device)\n context_i_data = torch.tensor(context_data[i],dtype=torch.float32,device=device)\n with torch.no_grad():\n if torch.all(context_i_bkg==0):\n noise = bkg_flow.transform_to_noise(inputs=inputs_i)\n corrected = data_flow._transform.inverse(noise)[0]\n else:\n noise = bkg_flow.transform_to_noise(inputs=inputs_i,context=context_i_bkg)\n corrected = data_flow._transform.inverse(noise,context=context_i_data)[0]\n corr.append(corrected.detach().cpu().numpy())\n del inputs_i,context_i_bkg,context_i_data,noise,corrected\n torch.cuda.empty_cache()\n corr = self.scale(np.concatenate(corr,axis=0),\"data\",forward=False,idx=self.varNames.index(currentVar))\n bkg_corr[currentVar] = np.copy(corr)\n del corr\n\n return np.concatenate([bkg_corr[v] for v in self.varNames],axis=1)\n\n def plotTriplet(self,ref,corr,uncorr,bins=50,xlabel=\"\",title=\"\",saveName=\"\",log=False,xlim=None,ylim=None):\n plt.subplots(figsize=(8,6),nrows=2,ncols=1,gridspec_kw={'height_ratios':[3,1]},sharex=True)\n plt.subplot(211)\n if xlim is not None:\n bins = np.linspace(xlim[0],xlim[1],bins)\n h1,bins,_ = plt.hist(ref,bins=bins,histtype='step',label=\"Data\",density=True,color='gray',fill=True,alpha=0.5)\n h2,bins,_ = plt.hist(corr,bins=bins,histtype='step',label=\"Corr Bkg\",density=True,color='C0',linewidth=2)\n h3,bins,_ = plt.hist(uncorr,bins=bins,histtype='step',label='Uncorr Bkg',density=True,color='red',linewidth=2)\n plt.title(title)\n if log:\n plt.yscale('log')\n plt.legend(loc='best')\n plt.subplot(212)\n w = bins[1]-bins[0]\n c = (bins[1:]+bins[:-1])/2\n h = np.divide(h2,h1,where=h1>0)\n plt.bar(x=c,height=h,width=w,align='center',color=\"C0\")\n h = np.divide(h3,h1,where=h1>0)\n plt.step(x=bins[:-1],y=h,where='post',color=\"red\",linewidth=2)\n plt.ylim([0.8,1.2] if ylim is None else ylim)\n plt.yticks(np.arange(0.8,1.3,0.1) if ylim is None else np.linspace(ylim[0],ylim[1],5))\n plt.grid(axis='y')\n plt.ylabel(\"Ratio\")\n plt.xlabel(xlabel)\n plt.savefig(saveName)\n\n def plotVar(self,var,bins,xlim=None,ylim=None):\n idx = self.varNames.index(var)\n data_train = self.scale(self.data_train[var],\"data\",forward=False,idx=idx)\n data_test = self.scale(self.data_test[var],\"data\",forward=False,idx=idx)\n bkg_train = self.scale(self.bkg_train[var],\"bkg\",forward=False,idx=idx)\n bkg_test = self.scale(self.bkg_test[var],\"bkg\",forward=False,idx=idx)\n #corrBkg_train = self.scale(self.correctedBkg_train[var],\"data\",forward=False,idx=idx)\n #corrBkg_test = self.scale(self.correctedBkg_test[var],\"data\",forward=False,idx=idx)\n corrBkg_train = self.correctedBkg_train[var]\n corrBkg_test = self.correctedBkg_test[var]\n\n saveName = f\"{self.outDir}/{var}_trainSet_beforeAfter.pdf\"\n self.plotTriplet(data_train,corrBkg_train,bkg_train,bins,self.varDict[var],\"Train Set\",saveName,xlim=xlim,ylim=ylim)\n saveName = f\"{self.outDir}/{var}_trainSet_beforeAfter_log.pdf\"\n self.plotTriplet(data_train,corrBkg_train,bkg_train,bins,self.varDict[var],\"Train Set\",saveName,log=True,xlim=xlim,ylim=ylim)\n\n saveName = f\"{self.outDir}/{var}_testSet_beforeAfter.pdf\"\n self.plotTriplet(data_test,corrBkg_test,bkg_test,bins,self.varDict[var],\"Test Set\",saveName,xlim=xlim,ylim=ylim)\n saveName = f\"{self.outDir}/{var}_testSet_beforeAfter_log.pdf\"\n self.plotTriplet(data_test,corrBkg_test,bkg_test,bins,self.varDict[var],\"Test Set\",saveName,log=True,xlim=xlim,ylim=ylim)\n\n def plotCurrent(self,bins,xlim=None,ylim=None):\n currentVar = self.fitVars[self.current]\n self.plotVar(currentVar,bins,xlim=xlim,ylim=ylim)\n\n def plotAll(self,bins):\n for i,v in enumerate(self.controlVars+self.fitVars):\n b = bins[i] if type(bins)==list else bins\n self.plotVar(v,b)\n\n def runCurrent(self,bs=10000,n_epoch=100,patience=20,learning_rate=1e-3,bins=50,wd=0,anneal=True):\n print(\"RUNNING TRAININGS\")\n self.trainCurrent(bs=bs,n_epoch=n_epoch,patience=patience,learning_rate=learning_rate,wd=wd,anneal=anneal)\n print(\"CORRECTING BKG\")\n self.correctCurrent(n_per=10000)\n print(\"PLOTTING CORRECTIONS\")\n self.plotCurrent(bins)\n\n def runAll(self,n_epoch=100,patience=20,learning_rate=1e-3,num_pt=100,bs=10000,wd=0):\n for i in range(len(self.fitVars)):\n nep = n_epoch[i] if type(n_epoch)==list else n_epoch\n pat = patience[i] if type(patience)==list else patience\n lr = learning_rate[i] if type(learning_rate)==list else learning_rate\n print(\"RUNNING TRAININGS\")\n self.trainCurrent(bs=bs,n_epoch=nep,patience=pat,learning_rate=lr,wd=wd)\n print(\"CORRECTING BKG\")\n self.correctCurrent(n_per=10000)\n self.stepForward()\n\n def stepForward(self):\n self.current += 1\n\n def stepBack(self):\n self.current -= 1\n\n def stepTo(self,step):\n self.current = step\n\n# def make_flow(num_features,num_context,kwargs,perm=False,base_dist=None):\n# flow_type = kwargs['flow_type']\n# if base_dist is None:\n# if num_context == 0:\n# base_dist = StandardNormal(shape=[num_features])\n# else:\n# encoder = nn.Linear(num_context,2*num_features)\n# base_dist = ConditionalDiagonalNormal(shape=[num_features],context_encoder=encoder)\n# base_dist = StandardNormal(shape=[num_features])\n# transforms = []\n# if num_context == 0:\n# num_context = None\n# for i in range(kwargs['num_layers']):\n# if flow_type == 'MAF':\n# transforms.append(MaskedAffineAutoregressiveTransform(features=num_features,\n# hidden_features=kwargs['hidden_features'],\n# num_blocks=kwargs['num_blocks_per_layer']))\n# elif flow_type == 'NSQUAD':\n# transforms.append(MaskedPiecewiseQuadraticAutoregressiveTransform(features=num_features,\n# context_features=num_context,\n# hidden_features=num_features,\n# num_bins=kwargs['num_bins'],\n# num_blocks=kwargs['num_blocks_per_layer'],\n# tail_bound=kwargs['tail_bound'],\n# tails='linear',\n# dropout_probability=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n# use_batch_norm=kwargs['batchnorm'] if 'batchnorm' in kwargs.keys() else False))\n# elif flow_type == 'NSRATQUAD':\n# transforms.append(MaskedPiecewiseRationalQuadraticAutoregressiveTransform(features=num_features,\n# context_features=num_context,\n# hidden_features=kwargs['hidden_features'],\n# num_bins=kwargs['num_bins'],\n# num_blocks=kwargs['num_blocks_per_layer'],\n# tail_bound=kwargs['tail_bound'],\n# tails=kwargs['tails'],\n# dropout_probability=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n# use_batch_norm=kwargs['batchnorm'] if 'batchnorm' in kwargs.keys() else False))\n\n\n# if i < kwargs['num_layers'] - 1 and perm:\n# transforms.append(ReversePermutation(features=num_features))\n# #transforms.append(RandomPermutation(features=num_features))\n\n# transform = CompositeTransform(transforms)\n# flow = Flow(transform, base_dist)\n# return flow\n\ndef make_flow(num_features,num_context,kwargs,perm=False,base_dist=None):\n flow_type = kwargs['flow_type']\n if base_dist is None:\n if num_context == 0:\n base_dist = StandardNormal(shape=[num_features])\n else:\n #encoder = NeuralNet(num_context, 20, 2*num_features, 3, out_act=nn.Identity())\n encoder = nn.Linear(num_context,2*num_features)\n base_dist = ConditionalDiagonalNormal(shape=[num_features],context_encoder=encoder)\n base_dist = StandardNormal(shape=[num_features])\n transforms = []\n if num_context == 0:\n num_context = None\n for i in range(kwargs['num_layers']):\n if flow_type == 'MAF':\n transforms.append(MaskedAffineAutoregressiveTransform(features=num_features,\n hidden_features=kwargs['hidden_features'],\n num_blocks=kwargs['num_blocks_per_layer']))\n elif flow_type == 'NSQUAD':\n transforms.append(MaskedPiecewiseQuadraticAutoregressiveTransform(features=num_features,\n context_features=num_context,\n hidden_features=num_features,\n num_bins=kwargs['num_bins'],\n num_blocks=kwargs['num_blocks_per_layer'],\n tail_bound=kwargs['tail_bound'],\n tails='linear',\n dropout_probability=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n use_batch_norm=kwargs['batchnorm'] if 'batchnorm' in kwargs.keys() else False))\n elif flow_type == 'NSRATQUAD':\n transforms.append(MaskedPiecewiseRationalQuadraticAutoregressiveTransform(features=num_features,\n context_features=num_context,\n hidden_features=kwargs['hidden_features'],\n num_bins=kwargs['num_bins'],\n num_blocks=kwargs['num_blocks_per_layer'],\n tail_bound=kwargs['tail_bound'],\n tails=kwargs['tails'],\n dropout_probability=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n use_batch_norm=kwargs['batchnorm'] if 'batchnorm' in kwargs.keys() else False))\n elif flow_type == \"IRQS\":\n transforms.append(IndependentRQS(features=num_features,\n context=num_context,\n hidden=kwargs['hidden_features'],\n num_hidden=kwargs['num_blocks_per_layer'],\n num_bins=kwargs['num_bins'],\n tails=kwargs['tails'],\n tail_bound=kwargs['tail_bound'],\n dropout=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n residual=kwargs['residual'] if 'residual' in kwargs.keys() else False))\n elif flow_type == \"ARQS\":\n transforms.append(AutoregressiveRQS(features=num_features,\n context=num_context,\n hidden=kwargs['hidden_features'],\n num_hidden=kwargs['num_blocks_per_layer'],\n num_bins=kwargs['num_bins'],\n tails=kwargs['tails'],\n tail_bound=kwargs['tail_bound'],\n dropout=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n residual=kwargs['residual'] if 'residual' in kwargs.keys() else False))\n elif flow_type == \"C1D\":\n transforms.append(Conditional1DRQS(features=num_features,\n context=num_context,\n hidden=kwargs['hidden_features'],\n num_hidden=kwargs['num_blocks_per_layer'],\n num_bins=kwargs['num_bins'],\n tails=kwargs['tails'],\n tail_bound=kwargs['tail_bound'],\n dropout=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n residual=kwargs['residual'] if 'residual' in kwargs.keys() else False))\n elif flow_type == \"CMRQS\":\n transforms.append(ConditionalMultiRQS(features=num_features,\n num_context=num_context,\n hidden=kwargs['hidden_features'],\n num_hidden=kwargs['num_blocks_per_layer'],\n num_bins=kwargs['num_bins'],\n tails=kwargs['tails'],\n tail_bound=kwargs['tail_bound'],\n dropout=kwargs['dropout'] if 'dropout' in kwargs.keys() else 0,\n residual=kwargs['residual'] if 'residual' in kwargs.keys() else False))\n\n\n if i < kwargs['num_layers'] - 1 and perm:\n transforms.append(ReversePermutation(features=num_features))\n #transforms.append(RandomPermutation(features=num_features))\n\n transform = CompositeTransform(transforms)\n flow = Flow(transform, base_dist)\n return flow\n\n\ndef trainflows(iMC_train,\n iMC_test,\n iData_train,\n iData_test,\n iNLayers=1,\n iSeparateScale=False):\n\n NF_kwargs = {\"flow_type\":\"IRQS\",\"tail_bound\":3.2,\"hidden_features\":150,\n \"num_layers\":10,\"num_bins\":100,\"num_blocks_per_layer\":1,\"tails\":\"linear\",\n \"dropout\":0.0,\"residual\":False}\n\n dim = iMC_train.shape[1]\n fitvars = []\n\n for i0 in range(dim):\n fitvars.append(str(i0))\n\n rangeScale = 3\n variables = fitvars\n trainer = chainedNFTrainer(\"NAME_FOR_PROJECT\",\n iMC_train,\n iMC_test,\n iData_train,\n iData_test,\n fitvars,\n control=[],\n NF_kwargs=NF_kwargs,\n rangeScale=rangeScale,\n separateScale=iSeparateScale)\n\n bs = 100000\n n_epoch = 100\n n_iter = n_epoch*iMC_train.shape[0]//bs\n for i0 in range(dim):\n\n trainer.trainCurrentBkg(\n patience=-1,\n n_epoch=n_iter,\n learning_rate=1e-3,\n bs=bs)\n\n trainer.trainCurrentData(\n patience=-1,\n n_epoch=n_iter,\n learning_rate=1e-3,\n bs=bs)\n\n trainer.correctCurrent()\n trainer.stepForward()\n\n return trainer\n","repo_name":"maxgalli/SSNF2","sub_path":"utils/chainedMorpher.py","file_name":"chainedMorpher.py","file_ext":"py","file_size_in_byte":46835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"6518696377","text":"import numpy as np\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\n#from model_torch import Net\nimport torch.nn as nn\nimport neptune\nimport torch.nn.functional as F\nfrom model_torch import model_fn\nfrom get_flops import find_flops\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntorch.cuda.set_device(1)\n\n\n#device = \"cpu\"\nclass Final_model:\n '''\n Get final accuracy of the trained model\n '''\n def __init__(self, epochs=5, child_batchsize=128,actions=None):\n '''\n Manager which is tasked with creating subnetworks, training them on a dataset, and retrieving\n rewards in the term of accuracy, which is passed to the controller RNN.\n\n Args:\n dataset: a tuple of 4 arrays (X_train, y_train, X_val, y_val)\n epochs: number of epochs to train the subnetworks\n child_batchsize: batchsize of training the subnetworks\n '''\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.epochs = epochs\n self.batchsize = child_batchsize\n \n transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batchsize,\n shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n self.testloader = torch.utils.data.DataLoader(testset, batch_size=self.batchsize,\n shuffle=False, num_workers=2)\n \n self.net = model_fn(actions,10).to(self.device)\n self.macs,_=find_flops(model_fn(actions,10) , input = -1)\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n \n def get_accuracy(self):\n criterion = nn.CrossEntropyLoss().to(self.device)\n optimizer = optim.Adam(self.net.parameters(), lr=0.001,betas=(0.9, 0.999))\n for epoch in range(self.epochs): # loop over the dataset multiple times\n running_loss = 0.0\n for i, data in enumerate(self.trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = self.net(inputs.to(self.device))\n loss = criterion(outputs, labels.to(self.device))\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 200 == 199: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n correct = 0\n total = 0\n with torch.no_grad():\n for data in self.testloader:\n images, labels = data\n outputs = self.net(images.to(self.device))\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted.cpu() == labels).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n \n acc=correct/total\n return acc\n \nif __name__=='__main__':\n ##### Best states by NAS RL with accuracy as reward ######\n# actions=[3,32,3,32,3,64,3,64]\n# final_model=Final_model(epochs=70, child_batchsize=128,actions=actions)\n# print(\"Final accuracy of the models\",final_model.get_accuracy())\n# print(\"Flops of the model\",final_model.macs)\n \n \n ##### Best states by NAS RL with proxy score as reward ######\n #actions=[1,16,1,16,1,16,3,32]\n actions=[3,64,3,32,3,64,3,32]\n final_model=Final_model(epochs=70, child_batchsize=128,actions=actions)\n print(\"Final accuracy of the models\",final_model.get_accuracy())\n print(\"Flops of the model\",final_model.macs)\n \n \n \n \n #### Best states across N trials for NAS without training\n# actions_list=[[3, 32, 3, 16, 3, 32, 1, 32], [3, 32, 1, 64, 3, 16, 3, 32], [3, 32, 3, 16, 3, 16, 1, 32], [3, 64, 1, 32, 3, 64, 3, 32], [3, 32, 3, 16, 3, 64, 3, 16], [3, 16, 3, 64, 1, 64, 1, 32], [3, 16, 3, 16, 1, 32, 3, 16], [3, 16, 3, 16, 1, 32, 1, 32], [3, 64, 1, 32, 3, 32, 3, 32], [3, 64, 1, 32, 3, 16, 1, 32]]\n \n# list_accuracies=[]\n# list_flops=[]\n# for actions in actions_list:\n# final_model=Final_model(epochs=70, child_batchsize=128,actions=actions)\n# list_accuracies.append(final_model.get_accuracy())\n# list_flops.append(final_model.macs)\n# print(\"Final accuracy of the model\",np.mean(list_accuracies))\n# print(\"Flops of the model\",np.mean(list_flops))\n \n \n ","repo_name":"punyajoy/nasrl-without-training","sub_path":"train_final_model.py","file_name":"train_final_model.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"37769523972","text":"import pandas as pd\nimport numpy as np\npath = \"http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data\"\ncolumn_name = ['Sample code number','Clump Thickness','Uniformity of Cell Size','Uniformity of Cell Shape','Marginal Adhesion','Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class']\ndata = pd.read_csv(path,names=column_name)\n#替换缺失值\ndata = data.replace(to_replace='?',value=np.nan)\ndata.dropna(inplace=True)\ndata.isnull().any() #所有列不存在缺失值\n\nfrom sklearn.model_selection import train_test_split\nx = data.iloc[:,1:-1]\ny = data[\"Class\"]\nx_train, x_test, y_train, y_test = train_test_split(x,y, random_state=22)\n\n#标准化:\nfrom sklearn.preprocessing import StandardScaler\ntransfer = StandardScaler()\nx_train = transfer.fit_transform(x_train)\nx_test = transfer.transform(x_test)\n\n#逻辑回归:\nfrom sklearn.linear_model import LogisticRegression\nestimator = LogisticRegression()\nestimator.fit(x_train,y_train)\n\n#逻辑回归模型参数:\n#得出模型\nprint(\"正规方程-权重系数为:\\n\", estimator.coef_)\nprint(\"正规方程-偏置为:\\n\", estimator.intercept_)\n\n#模型评估:\ny_predict = estimator.predict(x_test)\nprint(\"y_predict:\\n\",y_predict)\nprint(\"直接比对真实值和预测值:\\n\",y_test==y_predict)\nscore= estimator.score(x_test,y_test)\nprint(\"准确率:\\n\",score)\n\n#精准率和召回率计算\nfrom sklearn.metrics import classification_report\nreport = classification_report(y_test,y_predict,labels=[2,4],target_names=[\"良性\",\"恶行\"])\nprint(report)\n\n#ROC曲线,AUC 面积\nfrom sklearn.metrics import roc_auc_score\n# y_true :每个样本的真实类别,必须是0(反例),1(正例)\n#将t_test 转换成0 1 \ny_true= np.where(y_test > 3, 1,0)\nauc = roc_auc_score(y_true, y_predict)\nprint(auc)\n\n","repo_name":"sqhan-whu/M-Learning","sub_path":"scikit-learn/LogisticRegression_Precision_ROC.py","file_name":"LogisticRegression_Precision_ROC.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"6287197668","text":"__author__ = 'arya'\nfrom django.conf.urls import patterns, url\nfrom iisc import views\n\n\nurlpatterns = patterns('',\n #url(r'^profs/(\\d+)/$', views.ProfessorRaw, name='register'),\n url(r'^fn/(\\d+)/$', views.GetObjectFromFiducial, name='fn'),\n url(r'^fn/(\\w+)/$', views.GetObjectFromFiducial, {'string': True}, name='fn_str'),\n url(r'^fn/(?P\\d+)/rate/(?P\\d+)/(?P\\d+)/$', views.rate, name='rate'),\n url(r'^fn/(?P\\w+)/rate/(?P\\d+)/(?P\\d+)/$', views.rate,{'string': True}, name='rate_str'),\n url(r'^register/$', views.register, name='register'),\n url(r'^fn/(?P\\d+)/save/(?P\\d+)/$', views.save_fid, name='save'),\n url(r'^fn/(?P\\w+)/save/(?P\\d+)/$', views.save_fid,{'string': True}, name='save_str'),\n)\n","repo_name":"misingnoglic/iisc_app","sub_path":"iisc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"39251275419","text":"# Libraries:\nimport pandas as pd\nimport numpy as np\nimport requests\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas_ta as ta\nfrom datetime import date, datetime\nfrom functools import reduce\n\n\n# Data Info:\nhost = 'https://gist.githubusercontent.com/FabianTriana/'\nkey_quotes = 'b613ab453e7ac8a50b2561d48c2e9838'\nkey_fundamentals = 'dabd6fc124a0e6912d963365aaf69544'\nkey_non_publicly_traded_shares = 'dd9a9e2c22c55e1261980f45a9e5a324'\nfile_quotes = '/raw/infobvc_data.csv'\nfile_fundamentals = '/raw/infobvc_fundamental_data.csv'\nfile_non_publicly_traded_shares = '/raw/non_publicly_traded_shares.csv'\nurl_quotes = host + key_quotes + file_quotes\nurl_fundamentals = host + key_fundamentals + file_fundamentals\nurl_non_publicly_traded_shares = host + key_non_publicly_traded_shares + file_non_publicly_traded_shares\n\ndf_quotes = pd.read_csv(url_quotes)\ndf_quotes['date'] = pd.to_datetime(df_quotes['date'])\ndf_fundamentals = pd.read_csv(url_fundamentals)\ndf_non_publicly_traded_shares = pd.read_csv(url_non_publicly_traded_shares)\n\ndict_data = {'quotes': df_quotes, 'fundamentals': df_fundamentals}\n\n\n# Default Values:\nminimum_date = str(date(date.today().year-1, 1, 1))\nmaximum_date = str(date.today())\n\n\n\n# Available Tickers:\nvalid_tickers = list(pd.read_csv(url_quotes)['ticker'].unique())+['*']\n\n\n\n# Valid aggregation periods:\nvalid_aggregation_periods = ['D', 'M', 'Q', 'Y']\n\n\n\n\n# Functions:\n# Get Quotes Function:\ndef get_quotes(tickers: list = ['*'], min_date: str = minimum_date, max_date: str = maximum_date, agg_period: str = 'd'):\n # Docstring:\n '''Get data of prices, quantities and volumes for stocks, period and aggregation period selected.\n\n Parameters\n ----------\n tickers : list\n List containing the tickers. Default value is ['*'] which means all available tickers.\n min_date: str\n Minimum date. It must have `%Y-%m-%d` format.\n max_date: str\n Maximum date. It must have `%Y-%m-%d` format.\n agg_period: str\n Aggregation period. The only valid values are `d` (day), `m` (month), `q` (quarter) and `y` (year).\n\n Returns\n ----------\n df : pandas.DataFrame\n '''\n # Capitalize all tickers selected:\n tickers = [the_tickers.upper() for the_tickers in tickers]\n\n # Uppercase for period selected:\n agg_period = agg_period.upper()\n\n # Check that all tickers selected are valid:\n not_valid_tickers = np.setdiff1d(tickers, valid_tickers)\n if len(not_valid_tickers) > 0:\n raise Exception('Error. {} are not valid tickers'.format(str(not_valid_tickers)))\n\n # Check that aggregation period is valid:\n if agg_period not in valid_aggregation_periods:\n raise Exception('Error. {} is not a valid aggregation period. Valid aggregation periods are {}'.format(agg_period, str(valid_aggregation_periods)))\n\n # Check that minimum date is a valid date:\n try:\n datetime.strptime(min_date, '%Y-%m-%d')\n except:\n raise Exception('''{} is not a valid date. Date must have '%Y-%m-%d' format'''.format(str(min_date)))\n\n # Check that maximum date is a valid date:\n try:\n datetime.strptime(max_date, '%Y-%m-%d')\n except:\n raise Exception('''{} is not a valid date. Date must have '%Y-%m-%d' format'''.format(str(max_date)))\n\n\n\n # Process Data:\n # Check if user has selected all tickers or only specific ones:\n if '*' in tickers:\n ticker_selection = valid_tickers\n else:\n ticker_selection = tickers\n min_date_filter = pd.to_datetime(df_quotes['date']) >= min_date\n max_date_filter = pd.to_datetime(df_quotes['date']) <= max_date\n ticker_filter = df_quotes['ticker'].isin(ticker_selection)\n df_filtered = df_quotes[min_date_filter & max_date_filter & ticker_filter].sort_values(by = ['ticker', 'date'], ascending = [True, False]).reset_index(drop = True)\n\n # Process according to aggregation period:\n if agg_period in ['D']:\n df = df_filtered\n elif agg_period in ['M', 'Q', 'Y']:\n df = df_filtered.groupby(['ticker', 'issuer', df_filtered['date'].dt.to_period(agg_period)], as_index = True).agg({'close': 'last', 'quantity': 'sum', 'open': 'first', 'low': 'min', 'high': 'max'}).reset_index().astype({'date': str})\n else:\n df = pd.DataFrame({})\n\n # Return:\n return df\n\n\n\n# Get Fundamentals Function:\ndef get_fundamentals(tickers: list = ['*'], ratios: bool = False):\n # Docstring:\n '''Get fundamental data for tickers selected.\n\n Parameters\n ----------\n ticker : list\n List containing the tickers. Default value is ['*'] which means all available tickers.\n ratios : bool\n If True then fundamental ratios (P/E, P/B and Yield) are also returned. If False then fundamental ratios are not returned.\n\n Returns\n ----------\n df : pandas.DataFrame\n '''\n # Capitalize all tickers selected:\n tickers = [the_tickers.upper() for the_tickers in tickers]\n df_quotes = dict_data['quotes']\n\n # Check that all tickers selected are valid:\n not_valid_tickers = np.setdiff1d(tickers, valid_tickers)\n if len(not_valid_tickers) > 0:\n raise Exception('Error. {} are not valid tickers'.format(str(not_valid_tickers)))\n\n\n\n # Process Data:\n # Check if user has selected all tickers or only specific ones:\n if '*' in tickers:\n ticker_selection = valid_tickers\n else:\n ticker_selection = tickers\n ticker_filter = df_fundamentals['ticker'].isin(ticker_selection)\n df_filtered = df_fundamentals[ticker_filter].reset_index(drop = True)\n df = df_filtered\n\n # Check if user has selected ratios:\n if ratios == True:\n df_quotes = df_quotes[df_quotes['close'].notna()]\n df_quotes = df_quotes.sort_values(by = ['date', 'ticker']).reset_index(drop = True)\n df_quotes_last = df_quotes.groupby(['issuer', 'ticker'], as_index = False).last()[['issuer', 'ticker', 'close']]\n df_quotes_last_selected = df_quotes_last[df_quotes_last['ticker'].isin(ticker_selection)]\n issuers = list(df['issuer'].unique())\n df = df[['ticker', 'dividend', 'sector', 'shares']].merge(df_quotes_last_selected, how = 'left', on = 'ticker')\n df['yield'] = df['dividend'] / df['close']\n\n\n # Get Data at Issuer Level:\n df_issuer = df_fundamentals[df_fundamentals['issuer'].isin(issuers)].reset_index(drop = True)\n # Correct for Davivienda NON PUBLICLY TRADED stocks:\n df_issuer['shares'] = np.where(df_issuer['issuer'] == 'BANCO DAVIVIENDA', df_issuer['shares'] + df_non_publicly_traded_shares[df_non_publicly_traded_shares['issuer'] == 'BANCO DAVIVIENDA']['shares'].unique()[0], df_issuer['shares'])\n\n df_issuer_group = df_issuer.groupby('issuer', as_index = False).agg({'shares': 'sum', 'sales': 'mean', 'net_income': 'mean', 'equity': 'mean', 'assets': 'mean'})\n df_issuer_group['book_value'] = df_issuer_group['equity'] / df_issuer_group['shares']\n df_issuer_group['eps'] = df_issuer_group['net_income'] / df_issuer_group['shares']\n df_issuer_group = df_issuer_group.drop(columns = ['shares'])\n\n # Get Issuer Capitalization:\n df_prices_issuer_cap = df_quotes_last[df_quotes_last['issuer'].isin(issuers)]\n df_prices_issuer_cap = df_prices_issuer_cap.merge(df_issuer[['ticker', 'issuer', 'shares']], how = 'left', on = ['ticker', 'issuer'])\n df_prices_issuer_cap['partial_cap'] = df_prices_issuer_cap['shares']*df_prices_issuer_cap['close']\n df_prices_issuer_cap_group = df_prices_issuer_cap.groupby('issuer', as_index = False).agg({'partial_cap': 'sum'}).rename(columns = {'partial_cap': 'cap'})\n\n # Merge dataframes:\n df = df.merge(df_issuer_group, how = 'left', on = 'issuer').merge(df_prices_issuer_cap_group, how = 'left', on = 'issuer')\n\n # Get ratios:\n df['price_to_book'] = df['close'] / df['book_value']\n df['price_to_earnings'] = df['close'] / df['eps']\n\n # Columns order:\n df = df[['ticker', 'shares', 'sales', 'net_income', 'equity',\n 'assets', 'dividend', 'cap', 'yield', 'book_value',\n 'eps', 'price_to_book', 'price_to_earnings', 'sector']]\n # Return:\n return df\n\n\n\n# Get Technicals Function:\ndef get_technicals(tickers: list = ['*']):\n # Docstring:\n '''Get technical indicators (SMAs, EMAs, RSI) data for tickers selected.\n\n Parameters\n ----------\n tickers : list\n List containing the tickers. Default value is ['*'] which means all available tickers.\n\n Returns\n ----------\n df : pandas.DataFrame\n '''\n # Capitalize all tickers selected:\n df = get_quotes(tickers)\n # If there are NaN for close value, replace with average:\n df['close'] = np.where((df['close'].isna() | df['close'] <= 0), df['average'], df['close'])\n\n # Drop records where close value is zero:\n df = df[df['close'] > 0].reset_index(drop = True)\n\n # Order data by ticker and date:\n df = df.sort_values(by = ['ticker', 'date']).reset_index(drop = True)\n\n # Get technical indicators by ticker:\n # Avoid problems for tickers list containing only one value:\n dummy_value = ''\n if len(tickers) < 2:\n df_dummy = df.copy()\n df_dummy['ticker'] = df['ticker'] + '_dummy'\n dummy_value = list(df_dummy['ticker'].unique())[0]\n df = pd.concat([df, df_dummy], axis = 0).reset_index(drop = True)\n\n # Simple Moving Averages:\n df_sma_20 = df.groupby('ticker').apply(lambda x: ta.sma(x['close'], 20)).squeeze().reset_index().drop(columns = ['level_1'])\n df_sma_50 = df.groupby('ticker').apply(lambda x: ta.sma(x['close'], 50)).squeeze().reset_index().drop(columns = ['level_1', 'ticker'])\n df_sma_200 = df.groupby('ticker').apply(lambda x: ta.sma(x['close'], 200)).squeeze().reset_index().drop(columns = ['level_1', 'ticker'])\n\n # Exponential Moving Averages:\n df_ema_20 = df.groupby('ticker').apply(lambda x: ta.ema(x['close'], 20)).squeeze().reset_index().drop(columns = ['level_1', 'ticker'])\n df_ema_50 = df.groupby('ticker').apply(lambda x: ta.ema(x['close'], 50)).squeeze().reset_index().drop(columns = ['level_1', 'ticker'])\n df_ema_200 = df.groupby('ticker').apply(lambda x: ta.ema(x['close'], 200)).squeeze().reset_index().drop(columns = ['level_1', 'ticker'])\n\n # Relative Strength Index:\n df_rsi = df.groupby('ticker').apply(lambda x: ta.rsi(x['close'])).squeeze().reset_index().drop(columns = ['level_1', 'ticker'])\n\n # Merge Dataframes:\n dfs_technicals = [df_sma_20, df_sma_50, df_sma_200, df_ema_20, df_ema_50, df_ema_200, df_rsi]\n\n df_technical = reduce(lambda left, right: pd.concat([left,right], axis = 1), dfs_technicals)\n\n # Drop dummy values:\n df_technical = df_technical[~df_technical['ticker'].str.contains('_dummy')].reset_index(drop = True)\n\n # Concat with dataframe:\n df = df[['date', 'ticker', 'issuer', 'close']]\n df = df[~df['ticker'].str.contains('_dummy')].reset_index(drop = True)\n df = pd.concat([df, df_technical], axis = 1)\n\n # Return:\n return df\n\n\n\n# Plot Prices Function:\ndef plot_prices(tickers: list = ['*'], min_date: str = minimum_date, max_date: str = maximum_date, agg_period: str = 'd'):\n # Docstring:\n '''Plot data of prices for stocks, period and aggregation period selected.\n\n Parameters\n ----------\n tickers : list\n List containing the tickers. Default value is ['*'] which means all available tickers.\n min_date: str\n Minimum date. It must have `%Y-%m-%d` format.\n max_date: str\n Maximum date. It must have `%Y-%m-%d` format.\n agg_period: str\n Aggregation period. The only valid values are `d` (day), `m` (month), `q` (quarter) and `y` (year).\n\n Returns\n ----------\n plot : seaborn.axisgrid.FacetGrid\n '''\n df = get_quotes(tickers, min_date, max_date, agg_period)\n # If aggregation period is 'd' and there are NaN for close value, replace with average:\n if agg_period == 'd':\n df['close'] = np.where((df['close'].isna() | df['close'] <= 0), df['average'], df['close'])\n\n # Drop records where close value is zero:\n df = df[df['close'] > 0].reset_index(drop = True)\n plot = sns.relplot(data = df, x = 'date', y = 'close', row = 'ticker', kind = 'line', facet_kws={'sharey': False, 'sharex': True}, aspect = 2)\n return plot\n\n\n\n# Comparative Plot Function:\ndef plot_comparative_prices(tickers: list = ['*'], min_date: str = minimum_date, max_date: str = maximum_date, agg_period: str = 'd'):\n # Docstring:\n '''Comparative plot of prices for stocks, period and aggregation period selected. Initial value is equivalent to 1 for every stock.\n\n Parameters\n ----------\n tickers : list\n List containing the tickers. Default value is ['*'] which means all available tickers.\n min_date: str\n Minimum date. It must have `%Y-%m-%d` format.\n max_date: str\n Maximum date. It must have `%Y-%m-%d` format.\n agg_period: str\n Aggregation period. The only valid values are `d` (day), `m` (month), `q` (quarter) and `y` (year).\n\n Returns\n ----------\n plot : seaborn.axisgrid.FacetGrid\n '''\n\n df = get_quotes(tickers, min_date, max_date, agg_period)\n\n # If aggregation period is 'd' and there are NaN for close value, replace with average:\n if agg_period == 'd':\n df['close'] = np.where((df['close'].isna() | df['close'] <= 0), df['average'], df['close'])\n\n # Sort by date, group by ticker and get initial value:\n df = df.sort_values(by = 'date').reset_index(drop = True)\n\n # Group by ticker and get initial value:\n df_group = df.groupby('ticker', as_index = False)['close'].first().rename(columns = {'close': 'initial_value'})\n\n # Merge with full dataframe:\n df = df.merge(df_group, how = 'left', on = 'ticker')\n\n # Get Relative Price:\n df['relative_price'] = df['close'] / df['initial_value']\n\n # Plot Relative Price:\n plot = sns.relplot(data = df, x = 'date', y = 'relative_price', hue = 'ticker', kind = 'line', aspect = 2)\n return plot\n\n\n\n# Correlation Matrix Function:\ndef correlation_matrix(tickers: list = ['*'], min_date: str = minimum_date, max_date: str = maximum_date, agg_period: str = 'd', as_plot: bool = False, annot: bool = False, x_size : int = 10, y_size : int = 10):\n # Docstring:\n '''Correlation matrix of close prices for stocks, period and aggregation period selected.\n\n Parameters\n ----------\n tickers : list\n List containing the tickers. Default value is ['*'] which means all available tickers.\n min_date: str\n Minimum date. It must have `%Y-%m-%d` format.\n max_date: str\n Maximum date. It must have `%Y-%m-%d` format.\n agg_period: str\n Aggregation period. The only valid values are `d` (day), `m` (month), `q` (quarter) and `y` (year).\n as_plot: bool\n If True then correlation matrix is returned as a plot. If False then correlation matrix is returned as a DataFrame. Default is False.\n annot: bool\n If True then correlation values are displayed on the plot. It is only useful when as_plot is True.\n x_size: int\n Width. It is only useful when as_plot is True.\n y_size: int\n Height. It is only useful when as_plot is True.\n\n Returns\n ----------\n df: pandas.DataFrame\n or\n plot : seaborn.axisgrid.FacetGrid\n '''\n df = get_quotes(tickers, min_date, max_date, agg_period)\n\n # If aggregation period is 'd' and there are NaN for close value, replace with average:\n if agg_period == 'd':\n df['close'] = np.where((df['close'].isna() | df['close'] <= 0), df['average'], df['close'])\n\n # Get Correlation Matrix:\n df_corr = df.pivot_table(index= 'date', columns = 'ticker', values = 'close', aggfunc = 'mean').corr()\n result = df_corr\n\n # If as_plot is True then return correlation matrix plot:\n if as_plot == True:\n fig, ax = plt.subplots(figsize = (x_size, y_size))\n sns.heatmap(df_corr, vmin = -1, vmax = 1, cmap = 'RdYlGn', center = 0, annot = annot, ax = ax)\n ax.set(title = 'Correlation Matrix')\n result = ax\n\n # Return:\n return result\n","repo_name":"FabianTriana/infobvc","sub_path":"src/infobvc/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":15508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"28799686786","text":"import models\r\nimport torchvision.models as tvmodels\r\n\r\n\r\ndef build_model(cfg):\r\n args = cfg.copy()\r\n name = args.pop('type')\r\n if hasattr(tvmodels, name):\r\n model = getattr(tvmodels, name)(**args)\r\n else:\r\n model = models.__dict__[name](**args)\r\n return model\r\n","repo_name":"xyupeng/LC-Booster","sub_path":"models/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"27"}
+{"seq_id":"3774926137","text":"from datetime import datetime\nfrom recipe import Recipe\n\nclass Book:\n\tdef __init__(self, name: str) -> None:\n\t\tif type(name) != str: raise TypeError('Name should be a string')\n\t\tself.name = name\n\t\tself.creation_date = datetime.now()\n\t\tself.last_update = self.creation_date\n\t\tself.recipes_list = {'starter': [], 'lunch': [], 'dessert': []}\n\n\n\tdef get_recipe_by_name(self, name: str) -> Recipe:\n\t\t\"\"\"\n\t\tPrints a recipe with the name text{name} and returns the instance\n\t\t\"\"\"\n\t\tif type(name) != str: raise TypeError('Name should be a string when searching for a recipe by name')\n\t\tfor type_list in self.recipes_list.values():\n\t\t\tfor recipe in type_list:\n\t\t\t\tif recipe.name == name:\n\t\t\t\t\tprint(recipe)\n\t\t\t\t\treturn recipe\n\t\tprint(f'Recipe not found: \"{name}\"')\n\t\treturn None\n\n\n\tdef get_recipes_by_types(self, recipe_type: str) -> list:\n\t\t\"\"\"\n\t\tGet all recipe names for a given recipe_type\n\t\t\"\"\"\n\t\tif type(recipe_type) != str: raise TypeError('Type should be a string when searching for a recipe by type')\n\t\tif recipe_type not in ['starter', 'lunch', 'dessert']: raise ValueError('Recipe type should be either starter, lunch or dessert')\n\t\treturn self.recipes_list.get(recipe_type)\n\t\t\n\n\n\tdef add_recipe(self, recipe: Recipe) -> None:\n\t\t\"\"\"\n\t\tAdd a recipe to the book and update last_update\n\t\t\"\"\"\n\t\tif type(recipe) != Recipe: raise TypeError('Recipe type should be from Recipe class type when adding a recipe')\n\t\tif recipe in self.recipes_list.get(recipe.recipe_type):\n\t\t\tif input('Recipe already in book, do you want to overwrite it ? (n to cancel)') == 'n': return\n\t\tself.recipes_list.get(recipe.recipe_type).append(recipe)\n\t\tself.last_update = datetime.now()\n\n","repo_name":"AntoineA67/42-ai-bootcamp","sub_path":"01/ex00/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"17645757702","text":"import sys\n\n# Runs in linear time, based on number of characters in file which are read one by one\n# This method is used in the tokenize method to obtain the next token in the giveen file\ndef get_next_token(file):\n token = ''\n\n while True:\n c = file.read(1)\n if len(c) == 0:\n if len(token) > 0:\n return token\n else:\n return None\n \n if c.isalnum():\n token = token + c.lower()\n elif len(token) > 0:\n return token\n\n\n# Runs in linear time, based on number of characters in file which are read one by one with get_next_token meethod \ndef tokenize(file_name):\n tokens = []\n with open(file_name, encoding='utf-8') as file:\n while True:\n token = get_next_token(file)\n if token != None:\n tokens.append(token)\n else:\n return tokens\n\n\n# Runs in linear time, as the tokenize function is used in this function, which goes through one character at a time in the given files\ndef common_tokens (file1, file2):\n file1_tokens = tokenize(file1)\n file2_tokens = tokenize(file2)\n \n common_set = set(file1_tokens).intersection(set(file2_tokens))\n \n print(len(common_set))\n\n\nif __name__ == \"__main__\":\n \n common_tokens(sys.argv[1],sys.argv[2])\n \n","repo_name":"swarnamohan/File-Tokenizer","sub_path":"PartB.py","file_name":"PartB.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"38382714844","text":"from django.urls import path\n\napp_name = 'loja_app'\n\nfrom loja_app import views\n\nurlpatterns = [\n path('cliente/', views.ClienteList.as_view(), name='cliente'),\n path('cliente/cadastro/', views.ClienteCreate.as_view(), name='create_cliente'),\n path('cliente/editar//', views.ClienteUpdate.as_view(), name='edit_cliente'),\n path('cliente/detail//', views.ClienteDetail.as_view(), name='detail_cliente'),\n path('cliente/delete//', views.ClienteDelete.as_view(), name='delete_cliente'),\n]","repo_name":"eduardomoraespy/classe_base_view_django","sub_path":"loja_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"36458767663","text":"import sprites as s\nimport items as i\nimport pygame\nimport math\n\n\nclass inventory:\n\n inventory_size = 25\n inventory_item_ids = [\"e\" for i in range(inventory_size)]\n inventory_item_amounts = [0 for i in range(inventory_size)]\n\n inventory_currsor_x = 0\n inventory_currsor_y = 0\n inventory_currsor_pos = None\n is_inventory_open = False\n\n # item in cursors slot\n pick_up_item_id = \"e\"\n pick_up_item_amount = 0\n\n # pos of hotbar_cursor\n hotbar_currsor = 0\n\n def __init__(self, surface, items):\n self.surface = surface\n self.items = items\n\n def add_item(self, item_id, amount):\n for i in range(0, self.inventory_size):\n if self.inventory_item_ids[i] == item_id:\n if self.inventory_item_amounts[i] < self.items.get_item_by_id(item_id).max_stack:\n self.inventory_item_amounts[i] += 1\n return None\n for i in range(0, self.inventory_size):\n if self.inventory_item_ids[i] == \"e\":\n self.inventory_item_ids[i] = item_id\n self.inventory_item_amounts[i] = 1\n return None\n \n def remove_item(self, x, y, amount):\n i = (y*5)+x\n self.inventory_item_amounts[i] -= amount\n if self.inventory_item_amounts[i] <= 0:\n self.inventory_item_amounts[i] = 0\n self.inventory_item_ids[i] = \"e\"\n\n # pick up or release item from or in currsor slot\n def pick_up_item(self):\n if self.is_inventory_open:\n if self.inventory_currsor_pos == \"i\":\n i = (self.inventory_currsor_y*5) + self.inventory_currsor_x+5\n elif self.inventory_currsor_pos == \"h\":\n i = self.inventory_currsor_x\n if self.inventory_currsor_pos != None:\n if self.inventory_item_ids[i] == self.pick_up_item_id and self.pick_up_item_id != \"e\":\n self.inventory_item_amounts[i] += self.pick_up_item_amount\n ms = self.items.get_item_by_id(self.pick_up_item_id).max_stack\n if self.inventory_item_amounts[i] > ms:\n self.pick_up_item_amount = self.inventory_item_amounts[i] - ms\n self.inventory_item_amounts[i] = ms\n else:\n self.pick_up_item_id = \"e\"\n self.pick_up_item_amount = 0\n else:\n self.inventory_item_ids[i], self.pick_up_item_id = self.pick_up_item_id, self.inventory_item_ids[i]\n self.inventory_item_amounts[i], self.pick_up_item_amount = self.pick_up_item_amount, self.inventory_item_amounts[i]\n\n # gets the cusor pos and x, y\n def update(self):\n if self.is_inventory_open:\n mx, my = pygame.mouse.get_pos()\n if mx >= 1054 and my >= 20 and mx <= 1260 and my <= 185:# inventory\n mx -= 1056\n my -= 22\n self.inventory_currsor_pos = \"i\"\n self.inventory_currsor_x = int(mx/41)\n self.inventory_currsor_y = int(my/41)\n elif mx >= 1054 and my >= 205 and mx <= 1260 and my <= 247:# hotbar\n mx -= 1056\n my -= 207\n self.inventory_currsor_pos = \"h\"\n self.inventory_currsor_x = int(mx/41)\n self.inventory_currsor_y = int(my/41)\n else:\n self.inventory_currsor_pos = None\n\n def draw_inventory(self, x, y):\n s.inventory(x, y, self.surface)\n i = 5\n for yi in range(4):\n for xi in range(5):\n if self.inventory_currsor_pos == \"i\":\n if xi != self.inventory_currsor_x or yi != self.inventory_currsor_y:\n if self.inventory_item_ids[i] != \"e\":\n self.items.get_item_by_id(self.inventory_item_ids[i]).sprite(x+6+(xi*41), y+6+(yi*41), self.surface, self.inventory_item_amounts[i])\n else:\n if self.inventory_item_ids[i] != \"e\":\n self.items.get_item_by_id(self.inventory_item_ids[i]).sprite(x+6+(xi*41), y+6+(yi*41), self.surface, self.inventory_item_amounts[i])\n i += 1\n\n def draw_inventory_currsor(self, x, y):\n if self.inventory_currsor_pos == \"i\":\n x = (self.inventory_currsor_x*41)+x\n y = (self.inventory_currsor_y*41)+y\n if self.pick_up_item_id == \"e\":\n s.inventory_currsor_empty(x, y, self.surface)\n i = (self.inventory_currsor_y*5) + self.inventory_currsor_x+5\n if self.inventory_item_ids[i] != \"e\":\n self.items.get_item_by_id(self.inventory_item_ids[i]).sprite(x+6, y+6, self.surface, self.inventory_item_amounts[i])\n else:\n s.inventory_currsor_full(x, y, self.surface)\n self.items.get_item_by_id(self.pick_up_item_id).sprite(x+6, y+6, self.surface, self.pick_up_item_amount)\n\n\n elif self.inventory_currsor_pos == \"h\":\n x = (self.inventory_currsor_x*41)+x\n y = (self.inventory_currsor_y*41)+y+185\n if self.pick_up_item_id == \"e\":\n s.inventory_currsor_empty(x, y, self.surface)\n i = self.inventory_currsor_x\n if self.inventory_item_ids[i] != \"e\":\n self.items.get_item_by_id(self.inventory_item_ids[i]).sprite(x+6, y+6, self.surface, self.inventory_item_amounts[i])\n else:\n s.inventory_currsor_full(x, y, self.surface)\n self.items.get_item_by_id(self.pick_up_item_id).sprite(x+6, y+6, self.surface, self.pick_up_item_amount)\n\n def draw_hotbar(self, x, y):\n s.hotbar(x, y, self.surface)\n for i in range(5):\n if i == self.hotbar_currsor:\n s.hotbar_currsor(x+(41*i), y, self.surface)\n if self.inventory_item_ids[i] != \"e\":\n self.items.get_item_by_id(self.inventory_item_ids[i]).sprite(x+6+(i*41), y+6, self.surface, self.inventory_item_amounts[i])\n \n def draw(self):\n if self.is_inventory_open:\n self.draw_inventory(1054, 20)\n self.draw_hotbar(1054, 205)\n self.draw_inventory_currsor(1054, 20)\n else:\n self.draw_hotbar(1054, 20)\n\nclass player:\n\n screen_pos_x = 0\n screen_pos_y = 0\n\n global_pos_x = 620\n global_pos_y = 40\n\n currsor_pos_x = 0\n currsor_pos_y = 0\n currsor_lenght = 80\n\n bottom_collision_points = [[0, 79], [39, 79]]\n\n right_collision_points = [[39, 0], [39, 40], [39, 79]]\n left_collision_points = [[0, 0], [0, 40], [0, 79]]\n\n top_collision_points = [[0, 0], [39, 0]]\n\n is_moving_right = False\n is_moving_left = False\n horizontal_movement_speed = 10\n\n is_gravity = False\n gravity_speed = 10\n gs = 10\n\n is_jumping = False\n jump_height = 20\n\n is_block_breaking = False\n block_breaking_progress = 0\n block_breaking_hotbar_progress = 0\n block_breaking_x = 0\n block_breaking_y = 0\n\n\n def __init__(self, surface, world):\n self.surface = surface\n self.world = world\n self.items = i.items()\n self.inventory = inventory(surface, self.items)\n self.get_screen_pos()\n self.get_cursor_pos()\n\n # for drawing\n def get_screen_pos(self):\n self.screen_pos_x = self.global_pos_x-self.world.global_screen_pos_x\n self.screen_pos_y = self.global_pos_y-self.world.global_screen_pos_y\n\n # returns the angle between two points\n def get_angle(self, x1, y1, x2, y2):\n x = x2 - x1\n y = -(y2 - y1)\n a = math.atan2(y, x)\n return -a\n\n # dist between two points\n def get_distance(self, x1, y1, x2, y2):\n x = x2 - x1\n y = y2 - y1\n d = math.sqrt((x**2)+(y**2))\n return d\n\n # gets the onscreen currsor pos\n def get_cursor_pos(self):\n mpx, mpy = pygame.mouse.get_pos()\n a = self.get_angle(self.screen_pos_x+20, self.screen_pos_y+20, mpx, mpy)\n d = self.get_distance(self.screen_pos_x+20, self.screen_pos_y+20, mpx, mpy)\n if d >= 80:\n self.currsor_pos_x = (math.cos(a)*self.currsor_lenght)+self.screen_pos_x+20\n self.currsor_pos_y = (math.sin(a)*self.currsor_lenght)+self.screen_pos_y+20\n else:\n self.currsor_pos_x = (math.cos(a)*d)+self.screen_pos_x+20\n self.currsor_pos_y = (math.sin(a)*d)+self.screen_pos_y+20\n\n def start_block_breaking(self):\n self.is_block_breaking = True\n x = int((self.world.screen_map_pos_x+self.currsor_pos_x)/40)\n y = int((self.world.screen_map_pos_y+self.currsor_pos_y)/40)\n if self.block_breaking_x != x or self.block_breaking_y != y:\n self.block_breaking_progress = 0\n self.block_breaking_x = x\n self.block_breaking_y = y\n\n def stop_block_breaking(self):\n self.is_block_breaking = False\n self.block_breaking_hotbar_progress = 0\n self.block_breaking_progress = 0\n\n def break_block(self):\n if self.is_block_breaking:\n b = self.world.blocks.get_block_by_id(self.world.loaded_map[self.block_breaking_y][self.block_breaking_x])\n if b.hardness != \"inf\":\n self.block_breaking_hotbar_progress = (self.block_breaking_progress/b.hardness)*100\n if b.hardness <= self.block_breaking_progress:\n d = b.break_dorps\n if d != None:\n self.inventory.add_item(d[0], d[1])\n self.world.loaded_map[self.block_breaking_y][self.block_breaking_x] = self.world.blocks.air.id\n self.world.get_screen_map()\n self.stop_block_breaking()\n self.block_breaking_progress += 1\n else:\n self.stop_block_breaking()\n\n def place_block(self):\n x = int((self.world.screen_map_pos_x+self.currsor_pos_x)/40)\n y = int((self.world.screen_map_pos_y+self.currsor_pos_y)/40)\n i = self.inventory.inventory_item_ids[self.inventory.hotbar_currsor]\n if self.world.loaded_map[y][x] == self.world.blocks.air.id and self.inventory.inventory_currsor_pos == None:\n if i != \"e\":\n if self.inventory.items.get_item_by_id(i).type == \"b\":\n self.world.loaded_map[y][x] = self.world.blocks.dirt.id\n self.world.get_screen_map()\n # checks if dest pos not coliding with player\n if self.is_block_colliding(self.screen_pos_x, self.screen_pos_y, self.bottom_collision_points):\n self.world.loaded_map[y][x] = self.world.blocks.air.id\n self.world.get_screen_map()\n return 0\n if self.is_block_colliding(self.screen_pos_x, self.screen_pos_y, self.top_collision_points):\n self.world.loaded_map[y][x] = self.world.blocks.air.id\n self.world.get_screen_map()\n return 0\n if self.is_block_colliding(self.screen_pos_x, self.screen_pos_y, self.right_collision_points):\n self.world.loaded_map[y][x] = self.world.blocks.air.id\n self.world.get_screen_map()\n return 0\n if self.is_block_colliding(self.screen_pos_x, self.screen_pos_y, self.left_collision_points):\n self.world.loaded_map[y][x] = self.world.blocks.air.id\n self.world.get_screen_map()\n return 0\n self.world.loaded_map[y][x] = self.inventory.items.get_item_by_id(i).type_spesifics.block_id\n self.inventory.remove_item(self.inventory.hotbar_currsor, 0, 1)\n self.world.get_screen_map()\n\n # checks players colision with blocks at x, y\n def is_block_colliding(self, x, y, collision_points):\n collision = False\n for p in collision_points:\n i = self.world.screen_map[int((p[1]+y+self.world.screen_map_draw_offset_y)/40)][int((p[0]+x+self.world.screen_map_draw_offset_x)/40)]\n if self.world.blocks.get_block_by_id(i).is_solid:\n collision = True\n break\n return collision\n\n def move_horizontaly(self):\n if self.is_moving_right:\n if not self.is_block_colliding(self.screen_pos_x+self.horizontal_movement_speed, self.screen_pos_y, self.right_collision_points):\n self.global_pos_x += self.horizontal_movement_speed\n self.world.move_screen_map(self.horizontal_movement_speed, 0)\n self.get_screen_pos()\n else:\n hms = self.horizontal_movement_speed\n while self.is_block_colliding(self.screen_pos_x+hms, self.screen_pos_y, self.right_collision_points):\n hms -= 1\n if hms <= 0:\n hms = 0\n break\n self.global_pos_x += hms\n self.world.move_screen_map(hms, 0)\n self.is_moving_right = False\n self.get_screen_pos()\n \n if self.is_moving_left:\n if not self.is_block_colliding(self.screen_pos_x-self.horizontal_movement_speed, self.screen_pos_y, self.left_collision_points):\n self.global_pos_x -= self.horizontal_movement_speed\n self.world.move_screen_map(-self.horizontal_movement_speed, 0)\n self.get_screen_pos()\n else:\n hms = self.horizontal_movement_speed\n while self.is_block_colliding(self.screen_pos_x-hms, self.screen_pos_y, self.left_collision_points):\n hms -= 1\n if hms <= 0:\n hms = 0\n break\n self.global_pos_x -= hms\n self.world.move_screen_map(-hms, 0)\n self.is_moving_left = False\n self.get_screen_pos()\n\n # checks for start of gravity. if so inits grav\n def is_falling(self):\n if not self.is_block_colliding(self.screen_pos_x, self.screen_pos_y+1, self.bottom_collision_points):\n if not self.is_gravity and not self.is_jumping:\n self.is_gravity = True\n self.gs = self.gravity_speed\n \n def gravity(self):\n if self.is_gravity:\n if not self.is_block_colliding(self.screen_pos_x, self.screen_pos_y+self.gs, self.bottom_collision_points):\n self.global_pos_y += self.gs\n if self.screen_pos_y >= 340:\n self.world.move_screen_map(0, self.gs)\n self.get_screen_pos()\n self.gs += 1\n else:\n while self.is_block_colliding(self.screen_pos_x, self.screen_pos_y+self.gs, self.bottom_collision_points):\n self.gs -= 1\n if self.gs <= 0:\n self.gs = 0\n break\n self.global_pos_y += self.gs\n if self.screen_pos_y >= 340:\n self.world.move_screen_map(0, self.gs) \n self.get_screen_pos()\n self.is_gravity = False\n\n def start_jumping(self):\n if self.is_block_colliding(self.screen_pos_x, self.screen_pos_y+1, self.bottom_collision_points):\n self.jh = self.jump_height\n self.is_jumping = True\n\n def jumping(self):\n if self.is_jumping:\n if not self.is_block_colliding(self.screen_pos_x, self.screen_pos_y-self.jh, self.top_collision_points):\n self.global_pos_y -= self.jh\n if self.screen_pos_y <= 340:\n self.world.move_screen_map(0, -self.jh)\n self.get_screen_pos()\n self.jh -= 1\n if self.jh <= 0:\n self.is_jumping = False\n else:\n while self.is_block_colliding(self.screen_pos_x, self.screen_pos_y-self.jh, self.top_collision_points):\n self.jh -= 1\n if self.jh <= 0:\n self.jh = 0\n break\n self.global_pos_y -= self.jh\n if self.screen_pos_y <= 340:\n self.world.move_screen_map(0, self.jh) \n self.get_screen_pos()\n self.is_jumping = False\n\n def update(self):\n self.is_falling()\n self.gravity()\n self.jumping()\n self.move_horizontaly()\n\n self.break_block()\n\n self.inventory.update()\n\n self.get_cursor_pos()\n\n def draw_cursor(self):\n pygame.draw.line(self.surface, (0, 0, 0), [self.screen_pos_x+20, self.screen_pos_y+20], [self.currsor_pos_x, self.currsor_pos_y], 4)\n\n def draw_block_breaking_progress_bar(self):\n s.block_breaking_progress_bar(1160, 680, self.surface, self.block_breaking_hotbar_progress)\n\n def draw(self):\n s.player(self.screen_pos_x, self.screen_pos_y, self.surface)","repo_name":"ruzv/block","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":17129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"1305974018","text":"#-*- encoding: utf-8 -*-\n# Funciones (2) - 04\n \n\n# Esta función recibe un año y dice si es bisiesto y la razón\n# Función\ndef anoBisiesto (nAno):\n\tif nAno % 400 == 0 or (nAno % 100 != 0 and nAno % 4 == 0):\n\t\treturn True\n\telse:\n\t\treturn False\n\n\nprint ('Comparador de años bisiestos')\nprint ('Escriba un año y le diré si es bisiesto')\nnAno = int(input())\n\nif anoBisiesto(nAno):\n\tprint (u'El año %s es bisiesto' % str(nAno))\nelse:\n\tprint (u'El año %s no es bisiesto' % str(nAno))","repo_name":"bienvenidosaez/2asir1415","sub_path":"04 Python/20141119 Clase/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"43462241850","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.sparse import spdiags\n\n\ndef StaggeredMesh2dSquare(n):\n \"\"\"\n \"\"\"\n\n ## Assemble mesh coordinate arrays\n dx = 1.0/n # cell size in x,y\n xf = np.linspace(0., 1., n+1)\n # cell face coordinate vector, 1D\n xc = np.linspace(dx/2., 1.-dx/2, n)\n xb = np.hstack([0, xc, 1]) # cell center coordinate vector incl. boundaries\n Xu, Yu = np.meshgrid(xf, xb) # u-grid coordinate arrays\n Xv, Yv = np.meshgrid(xb, xf) # v-grid coordinate arrays\n Xp, Yp = np.meshgrid(xc, xc) # p-grid coordinate arrays\n Xi, Yi = np.meshgrid(xf, xf) # fd-grid coordinate arrays\n\n ## Plot mesh arrangement\n if (n < 12): # plot mesh if fewer than 12 cells in x,y\n fig1 = plt.figure(117)\n fig1.clf()\n ax = fig1.add_subplot(1, 1, 1)\n ax.plot(Xi, Yi, 'k--', Yi, Xi, 'k--', Xu, Yu, 'b>',\n Xv, Yv, 'r^', Xp, Yp, 'go', Xi, Yi, 'y*')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_title('Illustration of u-, v-, p- and fd-grid')\n fig1.show()\n\n return dx, Xu, Yu, Xv, Yv, Xp, Yp, Xi, Yi\n\n\ndef Hfunctions(n, dx, Re, u, v):\n \"\"\"\n \"\"\"\n\n m = n\n\n ## Compute H1,H2 functions\n uP = np.zeros((m, n))\n vP = np.zeros((m, n))\n uxP = np.zeros((m, n))\n vyP = np.zeros((m, n))\n\n # u,v,ux,vy to P-grid\n uP = 0.5 * (u[1:m+1, 1:n+1] + u[1:m+1, 0:n])\n uxP = 1/dx * (u[:, 1:n+1] - u[:, 0:n])\n vP = 0.5 * (v[1:m+1, 1:n+1] + v[0:m, 1:n+1])\n vyP = 1/dx * (v[1:m+1, :] - v[0:m, :])\n\n # u,v,uy,vx to FD-grid\n uFD = np.zeros((m+1, n+1))\n uyFD = np.zeros((m+1, n+1))\n vFD = np.zeros((m+1, n+1))\n vxFD = np.zeros((m+1, n+1))\n\n uFD[0, :] = u[0, :]\n uFD[1:m+1, :] = 0.5 * (u[2:m+2, :] + u[1:m+1, :])\n uFD[m, :] = u[m+1, :]\n\n vFD[:, 0] = v[:, 0]\n vFD[:, 1:n+1] = 0.5 * (v[:, 2:n+2] + v[:, 1:n+1])\n vFD[:, n] = v[:, n+1]\n\n uyFD[0, :] = 2./dx * (u[1, :] - u[0, :])\n uyFD[1:m, :] = 1./dx * (u[2:m+1, :] - u[1:m, :])\n uyFD[m, :] = 2./dx * (u[m+1, :] - u[m, :])\n\n vxFD[:, 0] = 2./dx * (v[:, 1] - v[:, 0])\n vxFD[:, 1:m] = 1./dx * (v[:, 2:n+1] - v[:, 1:n])\n vxFD[:, n] = 2./dx * (v[:, n+1] - v[:, n])\n\n ## Compute H1\n h1 = np.zeros((m, n+1))\n h1[:, 1:n] = (1./Re * (uxP[1:m+1, 1:n] - uxP[1:m+1, 0:n-1]) # 1/Re ux|_w^e\n - (uP[:, 1:n]**2 - uP[:, 0:n-1]**2)) * 1./dx \\\n + 1./dx * (1.0/Re * (uyFD[1:m+1, 1:n] - uyFD[0:m, 1:n]) # 1/Re uy|_s^n\n - (vFD[1:m+1, 1:n] * uFD[1:m+1, 1:n]\n - vFD[0:m, 1:n] * uFD[0:m, 1:n]))\n\n ## Compute H2\n h2 = np.zeros((m+1, n))\n h2[1:m, :] = (1./Re * (vxFD[1:m, 1:n+1] - vxFD[1:m, 0:n])\n - (uFD[1:m, 1:n+1]*vFD[1:m, 1:n+1]\n - uFD[1:m, 0:n]*vFD[1:m, 0:n])) * 1./dx \\\n + 1./dx * (1./Re * (vyP[1:m, 1:n+1] - vyP[0:m-1, 1:n+1])\n - (vP[1:m, :]**2 - vP[0:m-1, :]**2))\n\n return h1, h2\n\n\ndef LaplaceMatrix(n):\n \"\"\"\n Assemble Laplacian operator matrix\n \"\"\"\n\n ## Coefficient arrays\n D = np.ones((n, n))\n a_w = D.copy() # use copy here to avoid pointer assignment\n a_e = D.copy()\n a_s = D.copy()\n a_n = D.copy()\n a_p = -(a_w + a_e + a_s + a_n)\n\n ## Impose boundary conditions and compute source array from BC\n # homogenous Neumann Px = 0 on east and west, Py = 0 on north and south\n # with CDS ghost point approach\n\n # west\n a_p[:, 0] = a_p[:, 0] + a_w[:, 0]\n a_w[:, 0] = 0\n\n # east\n a_p[:, n-1] = a_p[:, n-1] + a_e[:, n-1]\n a_e[:, n-1] = 0\n\n # south\n a_p[0, :] = a_p[0, :] + a_s[0, :]\n a_s[0, :] = 0\n\n # north\n a_p[n-1, :] = a_p[n-1, :] + a_n[n-1, :]\n a_n[n-1, :] = 0\n\n ## Assemble system matrix\n offsets = np.array([-n, -1, 0, 1, n])\n data = np.hstack([\n np.vstack([a_w.reshape(n**2, 1, order='F')[n:n**2], np.zeros((n, 1))]),\n np.vstack([a_s.reshape(n**2, 1, order='F')[1:n**2], np.zeros((1, 1))]),\n a_p.reshape(n**2, 1, order='F')[0:n**2],\n np.vstack([np.zeros((1, 1)),\n a_n.reshape(n**2, 1, order='F')[0:n**2-1]]),\n np.vstack([np.zeros((n, 1)),\n a_e.reshape(n**2, 1, order='F')[0:n**2-n]])\n ])\n return spdiags(data.T, offsets, n**2, n**2, format=\"csc\")\n\n\ndef Source(n, dx, H1, H2):\n \"\"\"\n Generate source array\n Note that NS2dHfunctions returns H1 and H2\n such that H1w = H1e = H2s = H2n = 0 (BC cv's)\n \"\"\"\n return (dx * (H1[:, 1:n+1] - H1[:, 0:n] + H2[1:n+1, :] - H2[0:n, :]))\n\n\ndef postprocess(n, u, v, P):\n\n ## Coordinate arrays\n dx, Xu, Yu, Xv, Yv, Xp, Yp, Xi, Yi = StaggeredMesh2dSquare(n)\n\n ## Compute vorticity on FD-grid\n uyFD = 1./dx*(u[1:n+2, :] - u[0:n+1, :])\n vxFD = 1./dx*(v[:, 1:n+2] - v[:, 0:n+1])\n omega = vxFD - uyFD\n\n ## Extrapolate pressure field to walls\n Xpp, Ypp = np.meshgrid(Xv[0, :], Yu[:, 0])\n PP = np.zeros((n+2, n+2))\n PP[1:n+1, 1:n+1] = P.copy()\n\n # 2nd order Neumann:\n PP[1:n+1, 0] = P[:, 0]\n\n # 2nd order Neumann:\n PP[1:n+1, n+1] = P[:, n-1]\n\n # 2nd order Neumann:\n PP[0, 1:n+1] = P[0, :]\n\n # 2nd order Neumann:\n PP[n+1, 1:n+1] = P[n-1, :]\n\n return Xpp, Ypp, PP, omega\n","repo_name":"spietz/2DNSSolver","sub_path":"my_functions.py","file_name":"my_functions.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"}
+{"seq_id":"38169682987","text":"import pdb\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n#===============================================================================\n#============= CONTINUOUS SPACE VERSIONS ===========================\n#===============================================================================\n\ndef local_lipschitz_estimate(f,x,mode=1,eps = None, tol = 1e-12, maxit = 1e6,\n patience = 3, log_interval = 10, verbose = False):\n \"\"\"\n Compute one-sided lipschitz estimate for GSENN model. Adequate for local\n Lipschitz, for global must have the two sided version. This computes:\n\n max_z || f(x) - f(z)|| / || x - z||\n\n Instead of:\n\n max_z1,z2 || f(z1) - f(z2)|| / || z1 - z2||\n\n If eps provided, does local lipzshitz in ball of radius eps.\n\n Mode 1: max_z || f(x) - f(z)|| / || x - z|| , with f = theta\n Mode 2: max_z || f(x) - f(z)|| / || g(x) - g(z)||, with f = theta, g = h\n\n \"\"\"\n norm_lambda = 1e1\n f.eval()\n\n cuda = x.is_cuda\n\n x = Variable(x.data, requires_grad = False)\n if cuda:\n x = x.cuda()\n if eps is not None:\n # Start close to x!\n noise_vec = eps*torch.randn(x.size())\n if cuda:\n noise_vec = noise_vec.cuda()\n z = Variable(x.data.clone() + noise_vec, requires_grad = True)\n if mode == 1:\n progress_string = \"\\rStep: {:8}/{:8} Loss:{:8.2f} L:{:5.2f} ||x-z||:{:8.2f} Improv.:{:6.2f}\"\n else:\n progress_string = \"\\rStep: {:8}/{:8} Loss:{:8.2f} L:{:5.2f} ||gx-gz||:{:8.2f} Improv.:{:6.2f}\"\n else:\n z = Variable(torch.randn(x.size()), requires_grad = True)\n if cuda:\n z = z.cuda()\n progress_string = \"\\rStep: {:8}/{:8} L:{:5.2f} Improv.:{:6.2f}\"\n\n\n if mode == 1:\n # fx = f(x).detach()\n # fz = f(z)\n _ = f(x)\n fx = f.thetas.detach()\n _ = f(z)\n fz = f.thetas\n else:\n _ = f(x)\n fx = f.thetas.detach()\n gx = f.concepts.detach()\n _ = f(z)\n fz = f.thetas\n gz = f.concepts\n\n optim = torch.optim.SGD([z], lr=0.01)\n\n i = 0\n improvements = [tol*2]\n prev_lip = 0 #(((y_hat - y).norm())/((x-z).norm())).data[0]\n prev_loss = 0 #\n\n while True:\n i += 1\n optim.zero_grad()\n if mode == 1:\n _ = f(z)\n fz = f.thetas\n dist_f = (fz - fx).norm()\n dist_x = ( z - x).norm()\n loss = dist_x/dist_f # Want to maximize d_f/d_x (reciprocal)\n else:\n _ = f(z)\n fz = f.thetas\n gz = f.concepts\n dist_f = (fz - fx).norm()\n dist_g = (gz - gx).norm()\n loss = dist_g/dist_f # Want to maximize d_f/d_g\n\n lip = 1/loss.data[0]\n\n # Introduce ball constraint with lagrangean\n if eps is not None:\n #ball_loss = F.relu(dist_g - eps)\n if mode == 1:\n dist = dist_x.data[0]\n loss += norm_lambda*F.relu(dist_x - eps)\n else:\n dist = dist_g.data[0]\n loss += norm_lambda*F.relu(dist_g - eps)\n\n loss.backward()\n optim.step()\n\n # a last correction...\n #input_param.data.clamp_(0, 1)\n\n improvements.append(prev_loss - loss.data[0])\n prev_loss = loss.data[0]\n #print()\n #print(improvements[-1])\n\n if i % log_interval == 0:\n if eps is not None:\n prog_list = [i, maxit, loss.data[0], lip, dist, improvements[-1]]\n else:\n prog_list = [i, maxit, lip, improvements[-1]]\n\n print(progress_string.format(*prog_list), end = '')\n\n if (i > 10) and (max(improvements[-patience:]) < - tol):\n # Best improvement is negative and below tol threshold, i.e. all prev k steps wrosening > tol\n #print()\n #print(improvements[-patience:])\n if verbose: print('\\nReached stop condition: improvement stalled for {} iters.'.format(patience))\n break\n if (i > maxit):\n if verbose: print('\\nReached stop condition: maximum number of iterations ({}).'.format(maxit))\n break\n\n print()\n print('Estimated Lipschitz constant: {:8.2f}'.format(lip))\n if eps is not None and verbose:\n if mode == 1:\n print('|| x - z || = {:8.2f} < {:8.2f}'.format((z-x).norm().data[0], eps))\n else:\n print('|| g(x) - g(z) || = {:8.2f} < {:8.2f}'.format((gz-gx).norm().data[0], eps))\n\n return lip, z.data\n\ndef estimate_dataset_lipschitz(model, dataloader, continuous=True, mode = 1, eps = 1, tol = 1e-2, maxpoints = None,\n maxit = 1e5, patience = 3, log_interval = 10, cuda= False, verbose = False):\n \"\"\"\n Continuous and discrete space version.\n\n \"\"\"\n model.eval()\n Lips = []\n # ToDoL Add a parfor here\n \n\n for i, (inputs, targets) in enumerate(dataloader, 0):\n if cuda:\n inputs = inputs.cuda()\n #print(inputs.size())\n #print(asd.asd)\n inputs = Variable(inputs) #targets = Variable(inputs), Variable(targets)\n l,_ = local_lipschitz_estimate(model, inputs, mode = mode, eps=eps, tol=tol,\n maxit=maxit, log_interval=log_interval,\n patience = patience, verbose = verbose)\n Lips.append(l)\n #print('Warning: early break')\n #break\n if maxpoints is not None and i == maxpoints:\n break\n Lips = np.array(Lips)\n return Lips.mean(), Lips\n\n\n#===============================================================================\n#============= EMPIRICAL SAMPLE VERSIONS ===========================\n#===============================================================================\n\ndef pairwise_distances(x, y=None):\n '''\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n '''\n x_norm = (x**2).sum(1).view(-1, 1)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y**2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n # if y is None:\n # dist = dist - torch.diag(dist.diag)\n return torch.clamp(dist, 0.0, np.inf)\n\n\ndef sample_local_lipschitz(model, dataset, mode = 2, max_distance = None, top_k = 1, cuda = False):\n \"\"\"\n\n For every point in dataset, find pair point y in dataset that maximizes relative variation of model\n\n MODE 1: || th(x) - th(y) ||/||x - y||\n MODE 2: || th(x) - th(y) ||/||h(x) - h(y)||\n\n - dataset: a tds obkect\n - top_k : how many to return\n - max_distance: maximum distance between points to consider (radius)\n\n TODO: Takes matrix of distances to avoid recomputation in every step.\n NO POINT, WE NEED H DISTANCES, not x\n\n \"\"\"\n model.eval()\n\n tol = 1e-10 # To avoid numerical problems\n\n # Create dataloader from tds without shuffle\n dataloader = DataLoader(dataset, batch_size = 128, shuffle=False)\n n = len(dataset) # len(dataset)\n\n Hs = []\n Ts = []\n\n\n for i, (inputs, targets) in enumerate(dataloader):\n # get the inputs\n if cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n input_var = torch.autograd.Variable(inputs, volatile=True)\n target_var = torch.autograd.Variable(targets, volatile=True)\n\n _ = model(input_var)\n Ts.append(model.thetas.squeeze())\n Hs.append(model.concepts.squeeze())\n\n Ts = torch.cat(Ts, dim = 0)\n num_dists = pairwise_distances(Ts) # Numerator\n\n if mode == 1:\n denom_dists = pairwise_distances(dataset)\n if mode == 2:\n Hs = torch.cat(Hs)\n denom_dists = pairwise_distances(Hs)\n\n ratios = torch.Tensor(n,n)\n\n if max_distance is not None:\n # Distances above threshold: make them inf\n #print((denom_dists > max_distance).size())\n nonzero = torch.nonzero((denom_dists > max_distance).data).size(0)\n total = denom_dists.size(0)**2\n print('Number of zero denom distances: {} ({:4.2f}%)'.format(\n total - nonzero, 100*(total-nonzero)/total))\n denom_dists[denom_dists > max_distance] = -1.0 #float('inf')\n # Same with self dists\n denom_dists[denom_dists == 0] = -1.0 #float('inf')\n ratios = (num_dists/denom_dists).data\n argmaxes = {k: [] for k in range(n)}\n vals, inds = ratios.topk(top_k, 1, True, True)\n argmaxes = {i: [(j,v) for (j,v) in zip(inds[i,:],vals[i,:])] for i in range(n)}\n return vals[:,0].numpy(), argmaxes\n\n #\n #\n # n = len(dataset) # len(dataset)\n # ratios = {}\n # for i in tqdm(range(n)):\n # x = Variable(dataset.data_tensor[i,:]).view(1,-1)\n # _ = model(x)\n # Th_x = model.thetas\n # for j in range(n):\n # if i == j: continue\n # y = Variable(dataset.data_tensor[j,:]).view(1,-1)\n # ratio, num, denom = lipschitz_ratio(model, x, y, Th_x = Th_x)\n # if max_distance is not None and denom > max_distance:\n # continue\n # ratios[(i,j)] = ratio.data.numpy()\n # out = []\n # for i, (pair, val) in enumerate(sorted(ratios.items(), key=lambda x: x[1], reverse = True)):\n # out.append((pair, val))\n # if i + 1 == top_k:\n # break\n\n\ndef lipschitz_ratio(model, x, y, Th_x = None, mode = 1):\n \"\"\"\n For two points x,z compute:\n\n MODE 1: || th(x) - th(y) ||/||x - y||\n MODE 2: || th(x) - th(y) ||/||h(x) - h(y)||\n\n If Th_x provided, won't recompute.\n \"\"\"\n cuda = x.is_cuda\n if Th_x is None:\n x = Variable(x.data, requires_grad = False)\n if cuda:\n x = x.cuda()\n _ = model(x)\n Th_x = model.thetas\n\n _ = model(y)\n Th_y = model.thetas\n num = (Th_y - Th_x).norm()\n\n if mode == 1:\n denom = ( y - x).norm()\n else:\n h_x = model.concepts\n h_y = model.concepts\n denom = (h_x - h_y).norm()\n\n ratio = num/denom\n return ratio,num,denom\n\ndef find_maximum_lipschitz_dataset(model, dataset, top_k = 1, max_distance = None):\n \"\"\"\n Find pair of points x and y in dataset that maximize relative variation of model\n\n || f(x) - f(x) ||/||x - y||\n\n \"\"\"\n model.eval()\n n = len(dataset) # len(dataset)\n ratios = {}\n for i in range(n):\n x = Variable(dataset.data_tensor[i,:]).view(1,-1)\n fx = model(x)\n for j in range(i+1, n):\n y = Variable(dataset.data_tensor[j,:]).view(1,-1)\n fy = model(y)\n dxy = (x-y).norm().data.numpy()\n if max_distance is not None and dxy > max_distance:\n continue\n ratios[(i,j)] = (fx - fy).norm().data.numpy()/dxy\n out = []\n for i, (pair, val) in enumerate(sorted(ratios.items(), key=lambda x: x[1], reverse = True)):\n out.append((pair, val))\n if i + 1 == top_k:\n break\n return out\n","repo_name":"dmelis/SENN","sub_path":"SENN/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":11558,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"27"}
+{"seq_id":"4783810636","text":"from usched import Sched\nfrom machine import Pin\n\nled1 = Pin(4, Pin.OUT)\nled2 = Pin(13, Pin.OUT)\n\ndef toggle1(objLED, period):\n while True:\n yield period\n objLED.value(not objLED.value())\n\ndef toggle2(objLED, period):\n while True:\n yield period\n objLED.value(not objLED.value())\n\nobjSched = Sched()\nobjSched.add_thread(toggle1(led1, .2))\nobjSched.add_thread(toggle1(led2, 1))\nobjSched.run()\n","repo_name":"db4linq/micropython-dev-kit","sub_path":"tasking.py","file_name":"tasking.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"19802338691","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import Dataset\r\n\r\n\r\n\r\nclass VeggiesDataset(Dataset):\r\n\r\n def __init__(self, images, labels):\r\n\r\n self.images = torch.tensor(np.array(images))\r\n self.labels = torch.tensor(np.array(labels))\r\n\r\n def __len__(self):\r\n return len(self.labels)\r\n\r\n def __getitem__(self, idx):\r\n if torch.is_tensor(idx):\r\n idx = idx.tolist()\r\n \r\n sample = {'image': self.images[idx], 'label': self.labels[idx]}\r\n \r\n return sample\r\n \r\n \r\n \r\nclass ConvNet(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.conv1 = nn.Conv2d(3, 6, 11)\r\n self.pool = nn.MaxPool2d(2, 2)\r\n self.conv2 = nn.Conv2d(6, 8, 9)\r\n self.fc1 = nn.Linear(8 * 6 * 6, 128)\r\n self.fc2 = nn.Linear(128, 64)\r\n self.fc3 = nn.Linear(64, 15)\r\n\r\n def forward(self, x):\r\n \r\n x = x.float()\r\n x = self.pool(F.relu(self.conv1(x)))\r\n x = self.pool(F.relu(self.conv2(x)))\r\n x = torch.flatten(x, 1)\r\n x = F.relu(self.fc1(x))\r\n x = F.relu(self.fc2(x))\r\n x = self.fc3(x)\r\n return x","repo_name":"leow1511/Conv_Classifier_For_Veggetables","sub_path":"class_definition.py","file_name":"class_definition.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"42593616551","text":"# coding: utf-8\r\nfrom nltk.corpus import wordnet\r\nfrom collections import defaultdict\r\nimport math\r\nimport re\r\n\r\nto_wordnet_tag = {\r\n 'NN':wordnet.NOUN,\r\n 'JJ':wordnet.ADJ,\r\n 'VB':wordnet.VERB,\r\n 'RB':wordnet.ADV\r\n}\r\n\r\nstopwords = set([\r\n\"i\", \"a\", \"about\", \"an\", \"are\", \"as\", \"at\", \"be\", \"by\", \"for\", \"from\",\r\n\"how\", \"in\", \"is\", \"it\", \"of\", \"on\", \"or\", \"that\", \"the\", \"this\", \"to\",\r\n\"was\", \"what\", \"when\", \"where\", \"who\", \"will\", \"with\", \"the\", \"'s\", \"did\",\r\n\"have\", \"has\", \"had\", \"were\", \"'ll\"\r\n])\r\n#stopwords = set([\"a\", \"the\"])\r\n\r\nfrequency_path = 'resources/word-frequencies_org.txt'\r\n\r\nword_matcher = re.compile('[^0-9,.(=)\\\\[\\]/_`]+$')\r\nnum_matcher = re.compile('\\d+(\\.\\d+)?')\r\ndef is_word(w):\r\n # 数字や記号など、単語でないものはFalse\r\n return word_matcher.match(w) is not None\r\n\r\ndef get_locase_words(spos):\r\n # 単語と品詞タグのタプルの配列から、単語のみ抽出して小文字に変換された配列を返す\r\n return [x[0].lower() for x in spos\r\n if is_word(x[0])]\r\n\r\ndef get_non_words(spos):\r\n return [num_matcher.match(x[0]).group()\r\n for x in spos if num_matcher.match(x[0]) is not None]\r\n\r\ndef len_compress(l):\r\n return math.log(1. + l)\r\n\r\ndef get_lemmatized_words(sa):\r\n # 名詞、形容詞、動詞、副詞については原型に戻す\r\n rez = []\r\n for w, wpos in sa:\r\n w = w.lower()\r\n if w in stopwords or not is_word(w):\r\n continue\r\n wtag = to_wordnet_tag.get(wpos[:2])\r\n if wtag is None:\r\n wlem = w\r\n else:\r\n # 複数形→単数形、過去形→現在形などの変換\r\n # utf-8に一旦変換しないと、wordnetの中でエラー吐く\r\n wlem = wordnet.morphy(w.decode('utf-8'), wtag) or w\r\n rez.append(wlem.decode('utf-8'))\r\n return rez\r\n\r\ndef get_original_form_words(sa):\r\n return sa\r\n\r\ndef load_wweight_table(path):\r\n # 各単語のIDFの辞書を読み込み\r\n lines = open(path).readlines()\r\n wweight = defaultdict(float)\r\n if not len(lines):\r\n return (wweight, 0.)\r\n totfreq = int(lines[0])\r\n for l in lines[1:]:\r\n w, freq = l.split()\r\n freq = float(freq)\r\n if freq < 5:\r\n continue\r\n wweight[w.decode('utf-8')] = math.log(totfreq / freq)\r\n return wweight\r\n\r\n#wweight = load_wweight_table('resources/word-frequencies.txt')\r\nwweight = load_wweight_table(frequency_path)\r\nminwweight = min(wweight.values())\r\n","repo_name":"tmokmss/SemEvalSTS","sub_path":"sts_utilities.py","file_name":"sts_utilities.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"33414430019","text":"# !/usr/bin/env python3\n\"\"\"\n\nThis code illustrates the use of these modules:\n\n date_finder.py\n size_measurement_finder.py\n value_extractor.py\n tnm_stager.py\n\n\"\"\"\n\nimport json\nfrom algorithms.finder.date_finder import run as run_date_finder, DateValue, EMPTY_FIELD as EMPTY_DATE_FIELD\nfrom algorithms.value_extraction.value_extractor import run as run_value_extractor, ValueResult, Value, \\\n EMPTY_FIELD as EMPTY_VALUE_FIELD\nfrom algorithms.finder.size_measurement_finder import run as run_size_measurement, SizeMeasurement, \\\n EMPTY_FIELD as EMPTY_SMF_FIELD\nfrom algorithms.value_extraction.tnm_stage_extractor import run as run_tnm_stager, TNM_FIELDS, TnmCode, \\\n EMPTY_FIELD as EMPTY_TNM_FIELD\nfrom claritynlp_logging import log, ERROR, DEBUG\n\n\n###############################################################################\n#\n# date finder\n#\n###############################################################################\n\nsentences = [\n 'The dates 2012/11/28 and 2012/03/15 are in iso_YYYYMMDD format.',\n 'The dates 30-June 2008, 22DEC78, and 14 MAR 1879 use a string for the month.',\n 'Johannes Brahms was born on May 7, 1833.',\n 'The numbers 2004, 1968, 1492 are individual years.',\n 'The words January, Feb., Sept. and December are individual months.'\n]\n\nlog('\\n\\n***** DATE FINDER EXAMPLES ***** \\n')\n\nfor sentence in sentences:\n\n # scan the sentence for dates\n json_string = run_date_finder(sentence)\n\n # parse the JSON result\n json_data = json.loads(json_string)\n\n # unpack to a list of DateValue namedtuples\n date_results = [DateValue(**m) for m in json_data]\n\n log(' sentence: ' + sentence)\n\n index = 0\n for d in date_results:\n log(' text[{0}]: {1}'.format(index, d.text))\n log(' start[{0}]: {1}'.format(index, d.start))\n log(' end[{0}]: {1}'.format(index, d.end))\n if EMPTY_DATE_FIELD != d.year:\n log(' year[{0}]: {1}'.format(index, d.year))\n if EMPTY_DATE_FIELD != d.month:\n log(' month[{0}]: {1}'.format(index, d.month))\n if EMPTY_DATE_FIELD != d.day:\n log(' day[{0}]: {1}'.format(index, d.day))\n log()\n index += 1\n\n###############################################################################\n#\n# size measurements\n#\n###############################################################################\n\nsentences = [\n 'The cyst measured 1.2 x 1.3 cm.',\n 'The fluid had a volume of 15 cm3.',\n 'The feature was 1.5 cm craniocaudal x 2.2 cm traverse.',\n 'The various features measured 2.3, 1.5, 1.1, and 2.2 cm.',\n 'Today the lesion measures 1.5 x 2 cm; previously it was 1.9cm x 2.3mm.'\n]\n\nlog('\\n\\n***** SIZE MEASUREMENT EXAMPLES ***** \\n')\n\nfor sentence in sentences:\n\n # scan the sentence for size measurements\n json_string = run_size_measurement(sentence)\n\n # parse the JSON result\n json_data = json.loads(json_string)\n\n # unpack to a list of SizeMeasurement namedtuples\n measurements = [SizeMeasurement(**m) for m in json_data]\n\n log(' sentence: ' + sentence)\n\n index = 0\n for m in measurements:\n log(' text[{0}]: {1}'.format(index, m.text))\n log(' start[{0}]: {1}'.format(index, m.start))\n log(' end[{0}]: {1}'.format(index, m.end))\n log('temporality[{0}]: {1}'.format(index, m.temporality))\n log(' units[{0}]: {1}'.format(index, m.units))\n log(' condition[{0}]: {1}'.format(index, m.condition))\n if EMPTY_SMF_FIELD != m.x:\n log(' x[{0}]: {1}'.format(index, m.x))\n if EMPTY_SMF_FIELD != m.y:\n log(' y[{0}]: {1}'.format(index, m.y))\n if EMPTY_SMF_FIELD != m.z:\n log(' z[{0}]: {1}'.format(index, m.z))\n if EMPTY_SMF_FIELD != m.values:\n log(' values[{0}]: {1}'.format(index, m.values))\n if EMPTY_SMF_FIELD != m.x_view:\n log(' x_view[{0}]: {1}'.format(index, m.x_view))\n if EMPTY_SMF_FIELD != m.y_view:\n log(' y_view[{0}]: {1}'.format(index, m.y_view))\n if EMPTY_SMF_FIELD != m.z_view:\n log(' z_view[{0}]: {1}'.format(index, m.z_view))\n log()\n index += 1\n\n###############################################################################\n#\n# value extraction\n#\n###############################################################################\n\nsentences = [\n 'Vitals: T: 99 BP: 115/68 P: 79 R:21 O2: 97',\n 'Her BP on 3/27 from her 12 cm x 9 cm x 6 cm heart was 110/70.',\n 'her BP was less than 120/80, his BP was gt 110 /70, BP lt. 110/70',\n 'CTAB Pertinent Results: BLOOD WBC-7.0# RBC-4.02* Hgb-13.4* Hct-38.4* ' + \\\n 'MCV-96 MCH-33.2* MCHC-34.7 RDW-12.9 Plt Ct-172 02:24AM BLOOD WBC-4.4 RBC-4.21*',\n]\n\nlog('\\n\\n***** VALUE EXTRACTION EXAMPLES *****\\n')\n\nsearch_terms = 'T, BP, WBC'\n\n# limits can be either string or int\nminval = 0\nmaxval = 1000\n\nfor sentence in sentences:\n\n # scan the sentence for the desired values\n json_string = run_value_extractor(search_terms, sentence, minval, maxval)\n\n # parse the JSON result\n json_data = json.loads(json_string)\n\n # unpack to a ValueResult namedtuple\n result = ValueResult(**json_data)\n log(' sentence: {0}'.format(result.sentence))\n log(' terms: {0}'.format(result.terms))\n log(' querySuccess: {0}'.format(result.querySuccess))\n log('measurementCount: {0}'.format(result.measurementCount))\n\n # get the array of measurements\n measurements = result.measurementList\n\n # unpack to a list of Value namedtuples\n values = [Value(**m) for m in measurements]\n\n index = 0\n for v in values:\n log(' text[{0}]: {1}'.format(index, v.text))\n log(' start[{0}]: {1}'.format(index, v.start))\n log(' end[{0}]: {1}'.format(index, v.end))\n log(' condition[{0}]: {1}'.format(index, v.condition))\n log(' matchingTerm[{0}]: {1}'.format(index, v.matchingTerm))\n log(' x[{0}]: {1}'.format(index, v.x))\n if EMPTY_VALUE_FIELD != v.y:\n log(' y[{0}]: {1}'.format(index, v.y))\n log()\n index += 1\n\n###############################################################################\n#\n# TNM stager\n#\n###############################################################################\n\nsentences = [\n 'The tumor is classified as pT0pN1M0.',\n 'The tumor is classified as ypT0pN0M0, R0.',\n 'The tumor is classified as pT4bpN1bM0 (stage IIIC).',\n 'The tumor is classified as T4a N1a M1pul L1.',\n 'The tumor is classified as pT1bpN0(1/34) pM1 LYM.',\n 'The tumor is classified as ypT2C4(3) N1(2/16) pM1 G1-2VX L1 Pn0; R0 (liver), R1(cy+) (lung).'\n]\n\nlog('\\n\\n***** TNM STAGING EXAMPLES *****\\n')\n\nfor sentence in sentences:\n\n # scan the sentence for TNM codes\n json_string = run_tnm_stager(sentence)\n\n # parse the JSON result\n json_data = json.loads(json_string)\n\n # unpack to a list of TnmCode namedtuples\n tnm_codes = [TnmCode(**m) for m in json_data]\n\n # find the max length of all the field names:\n maxlen = len(max(TNM_FIELDS, key=len))\n\n log(' sentence: ' + sentence)\n\n index = 0\n for c in tnm_codes:\n\n # for each field in the field list\n for f in TNM_FIELDS:\n\n # get the value of this field for this code c\n attr = getattr(c, f)\n\n # log if field value is meaningful\n if EMPTY_TNM_FIELD != attr:\n indent = ' ' * (maxlen - len(f))\n log('{0}{1}: {2}'.format(indent, f, attr))\n log()\n index += 1","repo_name":"ClarityNLP/ClarityNLP","sub_path":"nlp/algorithms/value_extraction_wrappers/integration_examples.py","file_name":"integration_examples.py","file_ext":"py","file_size_in_byte":7671,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"27"}
+{"seq_id":"29550713085","text":"class Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n end = len(nums) - 1\n while end != 0:\n if nums[end] > nums[end - 1]:\n break\n end -= 1\n \n if end == 0:\n return nums.sort()\n \n for i, num in reversed(list(enumerate(nums))):\n if nums[i] > nums[end - 1]:\n nums[i], nums[end - 1] = nums[end - 1], nums[i]\n break\n last = len(nums)\n nums[end:] = [nums[i] for i in range(last-1, end-1, -1)]","repo_name":"kungbob/Leetcode_solution","sub_path":"python/31_Next_Permutation.py","file_name":"31_Next_Permutation.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"22873842647","text":"import get_data as gd\nimport os\n### order of data refresh \n\n\n#gd.datascrape\n\ndef check_csv_exists():\n file_path = '/datasets/weblinks.csv'\n return os.path.exists(file_path)\n#\ngd.scrapeexp\n\ngd.combinedata\n\ndef main():\n if not check_csv_exists():\n gd.datascrap.get_mp_info()\n else:\n print('weblinks csv exists moving onto next check')\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"amitl7/mp-expenses","sub_path":"mp_expenses/refresh_data.py","file_name":"refresh_data.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"23528279218","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport json\nimport sys\nimport time\nfrom typing import Any, Dict, List, Optional, Text\n\nimport absl\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nimport tensorflow as tf\n\nfrom tfx import types\nfrom tfx import version\nfrom tfx.types import artifact_utils\nfrom tfx.utils import telemetry_utils\n\n_POLLING_INTERVAL_IN_SECONDS = 30\n\n# TODO(b/139934802) Ensure mirroring of released TFX containers in Docker Hub\n# and gcr.io/tfx-oss-public/ registries.\n_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(version.__version__)\n\n_TF_COMPATIBILITY_OVERRIDE = {\n # Generally, runtimeVersion should be same as . of currently\n # installed tensorflow version, with certain compatibility hacks since\n # some TensorFlow runtime versions are not explicitly supported by\n # CAIP pusher. See:\n # https://cloud.google.com/ai-platform/prediction/docs/runtime-version-list\n '2.0': '1.15',\n # TODO(b/157039850) Remove this once CAIP model support TF 2.2 runtime.\n '2.2': '2.1',\n '2.3': '2.1',\n '2.4': '2.1'\n}\n\n\ndef _get_tf_runtime_version(tf_version: Text) -> Text:\n \"\"\"Returns the tensorflow runtime version used in Cloud AI Platform.\n\n This is only used for prediction service.\n\n Args:\n tf_version: version string returned from `tf.__version__`.\n\n Returns: same major.minor version of installed tensorflow, except when\n overriden by _TF_COMPATIBILITY_OVERRIDE.\n \"\"\"\n tf_version = '.'.join(tf_version.split('.')[0:2])\n return _TF_COMPATIBILITY_OVERRIDE.get(tf_version) or tf_version\n\n\ndef _get_caip_python_version(caip_tf_runtime_version: Text) -> Text:\n \"\"\"Returns supported python version on Cloud AI Platform.\n\n See\n https://cloud.google.com/ml-engine/docs/tensorflow/versioning#set-python-version-training\n\n Args:\n caip_tf_runtime_version: version string returned from\n _get_tf_runtime_version().\n\n Returns:\n '2.7' for PY2. '3.5' or '3.7' for PY3 depending on caip_tf_runtime_version.\n \"\"\"\n if sys.version_info.major == 2:\n return '2.7'\n (major, minor) = caip_tf_runtime_version.split('.')[0:2]\n if (int(major), int(minor)) >= (1, 15):\n return '3.7'\n return '3.5'\n\n\ndef start_aip_training(input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text,\n Any], executor_class_path: Text,\n training_inputs: Dict[Text,\n Any], job_id: Optional[Text]):\n \"\"\"Start a trainer job on AI Platform (AIP).\n\n This is done by forwarding the inputs/outputs/exec_properties to the\n tfx.scripts.run_executor module on a AI Platform training job interpreter.\n\n Args:\n input_dict: Passthrough input dict for tfx.components.Trainer.executor.\n output_dict: Passthrough input dict for tfx.components.Trainer.executor.\n exec_properties: Passthrough input dict for tfx.components.Trainer.executor.\n executor_class_path: class path for TFX core default trainer.\n training_inputs: Training input argument for AI Platform training job.\n 'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred. For\n the full set of parameters, refer to\n https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput\n job_id: Job ID for AI Platform Training job. If not supplied,\n system-determined unique ID is given. Refer to\n https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job\n\n Returns:\n None\n Raises:\n RuntimeError: if the Google Cloud AI Platform training job failed.\n \"\"\"\n training_inputs = training_inputs.copy()\n\n json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)\n absl.logging.info('json_inputs=\\'{}\\'.'.format(json_inputs))\n json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)\n absl.logging.info('json_outputs=\\'{}\\'.'.format(json_outputs))\n json_exec_properties = json.dumps(exec_properties, sort_keys=True)\n absl.logging.info('json_exec_properties=\\'{}\\'.'.format(json_exec_properties))\n\n # Configure AI Platform training job\n api_client = discovery.build('ml', 'v1')\n\n # We use custom containers to launch training on AI Platform, which invokes\n # the specified image using the container's entrypoint. The default\n # entrypoint for TFX containers is to call scripts/run_executor.py. The\n # arguments below are passed to this run_executor entry to run the executor\n # specified in `executor_class_path`.\n job_args = [\n '--executor_class_path', executor_class_path, '--inputs', json_inputs,\n '--outputs', json_outputs, '--exec-properties', json_exec_properties\n ]\n\n if not training_inputs.get('masterConfig'):\n training_inputs['masterConfig'] = {\n 'imageUri': _TFX_IMAGE,\n }\n\n training_inputs['args'] = job_args\n\n # Pop project_id so AIP doesn't complain about an unexpected parameter.\n # It's been a stowaway in aip_args and has finally reached its destination.\n project = training_inputs.pop('project')\n project_id = 'projects/{}'.format(project)\n with telemetry_utils.scoped_labels(\n {telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):\n job_labels = telemetry_utils.get_labels_dict()\n\n # 'tfx_YYYYmmddHHMMSS' is the default job ID if not explicitly specified.\n job_id = job_id or 'tfx_{}'.format(\n datetime.datetime.now().strftime('%Y%m%d%H%M%S'))\n job_spec = {\n 'jobId': job_id,\n 'trainingInput': training_inputs,\n 'labels': job_labels,\n }\n\n # Submit job to AIP Training\n absl.logging.info(\n 'Submitting job=\\'{}\\', project=\\'{}\\' to AI Platform.'.format(\n job_id, project))\n request = api_client.projects().jobs().create(\n body=job_spec, parent=project_id)\n request.execute()\n\n # Wait for AIP Training job to finish\n job_name = '{}/jobs/{}'.format(project_id, job_id)\n request = api_client.projects().jobs().get(name=job_name)\n response = request.execute()\n while response['state'] not in ('SUCCEEDED', 'FAILED'):\n time.sleep(_POLLING_INTERVAL_IN_SECONDS)\n response = request.execute()\n\n if response['state'] == 'FAILED':\n err_msg = 'Job \\'{}\\' did not succeed. Detailed response {}.'.format(\n job_name, response)\n absl.logging.error(err_msg)\n raise RuntimeError(err_msg)\n\n # AIP training complete\n absl.logging.info('Job \\'{}\\' successful.'.format(job_name))\n\n\ndef deploy_model_for_aip_prediction(\n serving_path: Text,\n model_version: Text,\n ai_platform_serving_args: Dict[Text, Any],\n executor_class_path: Text,\n):\n \"\"\"Deploys a model for serving with AI Platform.\n\n Args:\n serving_path: The path to the model. Must be a GCS URI.\n model_version: Version of the model being deployed. Must be different from\n what is currently being served.\n ai_platform_serving_args: Dictionary containing arguments for pushing to AI\n Platform. For the full set of parameters supported, refer to\n https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions#Version\n executor_class_path: class path for TFX core default trainer.\n\n Raises:\n RuntimeError: if an error is encountered when trying to push.\n \"\"\"\n absl.logging.info(\n 'Deploying to model with version {} to AI Platform for serving: {}'\n .format(model_version, ai_platform_serving_args))\n\n model_name = ai_platform_serving_args['model_name']\n project_id = ai_platform_serving_args['project_id']\n regions = ai_platform_serving_args.get('regions', [])\n default_runtime_version = _get_tf_runtime_version(tf.__version__)\n runtime_version = ai_platform_serving_args.get('runtime_version',\n default_runtime_version)\n python_version = _get_caip_python_version(runtime_version)\n\n api = discovery.build('ml', 'v1')\n body = {'name': model_name, 'regions': regions}\n parent = 'projects/{}'.format(project_id)\n try:\n api.projects().models().create(body=body, parent=parent).execute()\n except errors.HttpError as e:\n # If the error is to create an already existing model, it's ok to ignore.\n # TODO(b/135211463): Remove the disable once the pytype bug is fixed.\n if e.resp.status == 409: # pytype: disable=attribute-error\n absl.logging.warn('Model {} already exists'.format(model_name))\n else:\n raise RuntimeError('AI Platform Push failed: {}'.format(e))\n with telemetry_utils.scoped_labels(\n {telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):\n job_labels = telemetry_utils.get_labels_dict()\n body = {\n 'name': model_version,\n 'deployment_uri': serving_path,\n 'runtime_version': runtime_version,\n 'python_version': python_version,\n 'labels': job_labels,\n }\n\n # Push to AIP, and record the operation name so we can poll for its state.\n model_name = 'projects/{}/models/{}'.format(project_id, model_name)\n response = api.projects().models().versions().create(\n body=body, parent=model_name).execute()\n op_name = response['name']\n\n deploy_status_resc = api.projects().operations().get(name=op_name)\n while not deploy_status_resc.execute().get('done'):\n time.sleep(_POLLING_INTERVAL_IN_SECONDS)\n absl.logging.info('Model still being deployed...')\n\n deploy_status = deploy_status_resc.execute()\n\n if deploy_status.get('error'):\n # The operation completed with an error.\n raise RuntimeError(\n 'Failed to deploy model to AI Platform for serving: {}'.format(\n deploy_status['error']))\n\n # Set the new version as default.\n # By API specification, if Long-Running-Operation is done and there is\n # no error, 'response' is guaranteed to exist.\n api.projects().models().versions().setDefault(\n name='{}/versions/{}'.format(model_name, deploy_status['response']\n ['name'])).execute()\n\n absl.logging.info(\n 'Successfully deployed model {} with version {}, serving from {}'.format(\n model_name, model_version, serving_path))\n","repo_name":"joonkimchi/tfx-async","sub_path":"tfx/extensions/google_cloud_ai_platform/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"28096168296","text":"\ndef sort(l):\n \n for j in range(0,len(l)-1):\n minimum = j\n for i in range(j, len(l)):\n if(l[i] str:\n return str(path)\n\n\ndef convert_path(byt: bytes) -> Path:\n return Path(byt.decode())\n\n\nsqlite3.register_adapter(WindowsPath, adapt_path)\nsqlite3.register_adapter(PosixPath, adapt_path)\nsqlite3.register_converter(\"path\", convert_path)\n\n\ndef drop_tables(db: str) -> str:\n \"\"\"\n\n :param db:\n :return:\n \"\"\"\n with ExitStack() as stack:\n conn = stack.enter_context(DatabaseConnection(db))\n stack.enter_context(conn)\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS bonuses\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS tracks\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS bootlegdiscs\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS playeddiscs\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS rippeddiscs\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS discs\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS bootlegalbums\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS defaultalbums\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS digitalalbums\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS livealbums\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS albums\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS artists\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS genres\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS languages\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS supports\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS providers\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS countries\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS applications\")\n with suppress(sqlite3.OperationalError):\n conn.execute(\"DROP TABLE IF EXISTS repositories\")\n\n return db\n\n\ndef create_tables(db: str) -> str:\n \"\"\"\n\n :param db:\n :return:\n \"\"\"\n with ExitStack() as stack:\n conn = stack.enter_context(DatabaseConnection(db))\n stack.enter_context(conn)\n\n # --> Genres.\n conn.execute(\"CREATE TABLE IF NOT EXISTS genres (genreid INTEGER PRIMARY KEY ASC AUTOINCREMENT, genre TEXT NOT NULL)\")\n conn.executemany(\"INSERT INTO genres (genre) VALUES (?)\", [(item,) for item in GENRES])\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS genres_idx ON genres (genre ASC)\")\n\n # --> Languages.\n conn.execute(\"CREATE TABLE IF NOT EXISTS languages (languageid TEXT NOT NULL PRIMARY KEY, language TEXT NOT NULL)\")\n conn.executemany(\"INSERT INTO languages VALUES (?, ?)\", [('eng', 'English'), ('fra', 'French')])\n\n # --> Supports.\n conn.execute(\"CREATE TABLE IF NOT EXISTS supports (supportid INTEGER PRIMARY KEY ASC AUTOINCREMENT, support TEXT NOT NULL)\")\n conn.executemany(\"INSERT INTO supports (support) VALUES (?)\", [(\"CD\",), (\"Digital\",)])\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS supports_idx ON supports (support ASC)\")\n\n # --> Countries.\n conn.execute(\"CREATE TABLE IF NOT EXISTS countries (countryid TEXT NOT NULL PRIMARY KEY, country TEXT NOT NULL)\")\n conn.executemany(\"INSERT INTO countries VALUES (?, ?)\", [('AUS', 'Australia'),\n ('AUT', 'Austria'),\n ('BEL', 'Belgium'),\n ('BRA', 'Brazil'),\n ('CHE', 'Switzerland'),\n ('DEU', 'Germany'),\n ('ESP', 'Spain'),\n ('FIN', 'Finland'),\n ('FRA', 'France'),\n ('GBR', 'United Kingdom'),\n ('IRL', 'Ireland'),\n ('ITA', 'Italy'),\n ('MEX', 'Mexico'),\n ('NLD', 'Netherlands'),\n ('NOR', 'Norway'),\n ('NZL', 'New Zealand'),\n ('PRT', 'Portugal'),\n ('SWE', 'Sweden'),\n ('USA', 'United States'),\n ('ZAF', 'South Africa')])\n\n # --> Providers.\n conn.execute(\"CREATE TABLE IF NOT EXISTS providers (providerid INTEGER PRIMARY KEY ASC AUTOINCREMENT, provider TEXT NOT NULL)\")\n conn.executemany(\"INSERT INTO providers (provider) VALUES (?)\", [(\"nugs.net\",), (\"Crystal Cat Records\",), (\"Doberman\",), (\"The Godfatherecords\",), (\"HDtracks.com\",)])\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS providers_idx ON providers (provider ASC)\")\n\n # --> Applications.\n conn.execute(\"CREATE TABLE IF NOT EXISTS applications (applicationid INTEGER PRIMARY KEY ASC AUTOINCREMENT, application TEXT NOT NULL)\")\n conn.executemany(\"INSERT INTO applications (application) VALUES (?)\", [(\"dBpoweramp 15.1\",), (\"dBpoweramp Release 16.6\",)])\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS applications_idx ON applications (application ASC)\")\n\n # --> Repositories.\n conn.execute(\"CREATE TABLE IF NOT EXISTS repositories (repositoryid INTEGER PRIMARY KEY ASC AUTOINCREMENT, repository PATH NOT NULL)\")\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS repositories_idx ON repositories (repository ASC)\")\n conn.executemany(\"INSERT INTO repositories (repository) VALUES (?)\", [(Path(\"G:/\") / \"Music\" / \"Lossless1\",), (Path(\"G:/\") / \"Music\" / \"Lossless2\",)])\n\n # --> Artists.\n conn.execute(\"CREATE TABLE IF NOT EXISTS artists (artistsort TEXT NOT NULL PRIMARY KEY ASC, artist TEXT NOT NULL)\")\n\n # --> Albums.\n conn.execute(\"CREATE TABLE IF NOT EXISTS albums (\"\n \"albumid TEXT NOT NULL PRIMARY KEY ASC, \"\n \"artistsort TEXT NOT NULL REFERENCES artists (artistsort) ON DELETE CASCADE, \"\n \"discs INTEGER NOT NULL DEFAULT 1, \"\n \"genreid INTEGER REFERENCES genres (genreid) ON DELETE CASCADE DEFAULT 1, \"\n \"in_collection BOOLEAN NOT NULL DEFAULT 1, \"\n \"is_bootleg BOOLEAN NOT NULL DEFAULT 0, \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL)\")\n\n # --> Discs.\n conn.execute(\"CREATE TABLE IF NOT EXISTS discs (\"\n \"albumid TEXT NOT NULL, \"\n \"discid INTEGER NOT NULL, \"\n \"tracks INTEGER NOT NULL DEFAULT 10, \"\n \"is_live BOOLEAN NOT NULL DEFAULT 0, \"\n \"is_bonus BOOLEAN NOT NULL DEFAULT 0, \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid) REFERENCES albums (albumid) ON DELETE CASCADE)\")\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS discs_idx ON discs (albumid ASC, discid ASC)\")\n\n # --> Tracks.\n conn.execute(\"CREATE TABLE IF NOT EXISTS tracks (\"\n \"albumid TEXT NOT NULL, \"\n \"discid INTEGER NOT NULL, \"\n \"trackid INTEGER NOT NULL, \"\n \"title TEXT NOT NULL, \"\n \"is_live BOOLEAN NOT NULL DEFAULT 0, \"\n \"is_bonus BOOLEAN NOT NULL DEFAULT 0, \"\n \"languageid TEXT REFERENCES languages (languageid) ON DELETE CASCADE DEFAULT 'eng', \"\n \"supportid INTEGER REFERENCES supports (supportid) ON DELETE CASCADE DEFAULT 1, \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid, discid) REFERENCES discs (albumid, discid) ON DELETE CASCADE)\")\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS tracks_idx ON tracks (albumid ASC, discid ASC, trackid ASC)\")\n\n # --> Rippeddiscs.\n conn.execute(\"CREATE TABLE IF NOT EXISTS rippeddiscs (\"\n \"albumid TEXT NOT NULL, \"\n \"discid INTEGER NOT NULL, \"\n \"applicationid INTEGER REFERENCES applications (applicationid) ON DELETE CASCADE DEFAULT 1, \"\n \"utc_ripped TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid, discid) REFERENCES discs (albumid, discid) ON DELETE CASCADE)\")\n\n # --> Playeddiscs.\n conn.execute(\"CREATE TABLE IF NOT EXISTS playeddiscs (\"\n \"albumid TEXT NOT NULL, \"\n \"discid INTEGER NOT NULL, \"\n \"played INTEGER NOT NULL DEFAULT 0, \"\n \"utc_played TIMESTAMP DEFAULT NULL, \"\n \"utc_created TIMESTAMP NOT NULL, \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid, discid) REFERENCES discs (albumid, discid) ON DELETE CASCADE)\")\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS playeddiscs_idx ON playeddiscs (albumid ASC, discid ASC)\")\n\n # --> Bootlegdiscs.\n conn.execute(\"CREATE TABLE IF NOT EXISTS bootlegdiscs (\"\n \"albumid TEXT NOT NULL, \"\n \"discid INTEGER NOT NULL, \"\n \"bootlegid TEXT DEFAULT NULL, \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid, discid) REFERENCES discs (albumid, discid) ON DELETE CASCADE)\")\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS bootlegdiscs_idx ON bootlegdiscs (albumid ASC, discid ASC)\")\n\n # --> Defaultalbums.\n conn.execute(\n \"CREATE TABLE IF NOT EXISTS defaultalbums (\"\n \"albumid TEXT PRIMARY KEY ASC, \"\n \"origyear INTEGER NOT NULL, \"\n \"year INTEGER NOT NULL, \"\n \"album TEXT NOT NULL, \"\n \"label TEXT DEFAULT NULL, \"\n \"upc TEXT DEFAULT NULL, \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid) REFERENCES albums (albumid) ON DELETE CASCADE)\")\n\n # --> Bootlegalbums.\n conn.execute(\"CREATE TABLE IF NOT EXISTS bootlegalbums (\"\n \"albumid TEXT PRIMARY KEY ASC, \"\n \"providerid INTEGER REFERENCES providers (providerid) ON DELETE CASCADE DEFAULT 1, \"\n \"title TEXT DEFAULT NULL, \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid) REFERENCES albums (albumid) ON DELETE CASCADE)\")\n\n # --> Livealbums.\n conn.execute(\"CREATE TABLE IF NOT EXISTS livealbums (\"\n \"albumid TEXT PRIMARY KEY ASC, \"\n \"date DATE NOT NULL, \"\n \"city TEXT NOT NULL, \"\n \"tour TEXT NOT NULL, \"\n \"countryid TEXT REFERENCES countries (countryid) ON DELETE CASCADE DEFAULT 'USA', \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid) REFERENCES albums (albumid) ON DELETE CASCADE)\")\n\n # --> Bonuses.\n conn.execute(\"CREATE TABLE IF NOT EXISTS bonuses (\"\n \"albumid TEXT NOT NULL, \"\n \"discid INTEGER NOT NULL, \"\n \"trackid INTEGER NOT NULL, \"\n \"date DATE NOT NULL, \"\n \"city TEXT NOT NULL, \"\n \"tour TEXT NOT NULL, \"\n \"countryid TEXT REFERENCES countries (countryid) ON DELETE CASCADE DEFAULT 'USA', \"\n \"utc_created TIMESTAMP NOT NULL DEFAULT (DATETIME('now')), \"\n \"utc_modified TIMESTAMP DEFAULT NULL, \"\n \"FOREIGN KEY (albumid, discid, trackid) REFERENCES tracks (albumid, discid, trackid) ON DELETE CASCADE)\")\n conn.execute(\"CREATE UNIQUE INDEX IF NOT EXISTS bonuses_idx ON bonuses (albumid ASC, discid ASC, trackid ASC)\")\n\n return db\n\n\nif __name__ == \"__main__\":\n import argparse\n import os\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"database\")\n arguments = parser.parse_args()\n create_tables(drop_tables(os.path.abspath(arguments.database)))\n","repo_name":"cestMoiBaliBalo/first-project","sub_path":"MyPythonProject/Applications/Tables/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":14573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"13871171141","text":"from user import *\n\n\nclass Receiver(Thread):\n def run(self):\n while 1:\n time.sleep(0.1)\n for group_id in groups_dict:\n while 1:\n (msg_type, msg) = grp_recv(group_id)\n if msg_type == GM_MSG_CODE:\n print(groups_dict[group_id], \": \", YELLOW, msg, ENDC)\n elif msg_type == USER_MSG_CODE:\n print(groups_dict[group_id], \": \", BLUE, msg, ENDC)\n else:\n break\n################################################################################\n\ngroups_no = int(input(\"Enter number of groups you want to enter: \"))\ngroups_dict = {}\nfor i in range(groups_no):\n gname = input(\"Name of \"+ str(i) + \"th group: \")\n g_id = join(gname, sys.argv[1])\n print(\"Group id: \", g_id)\n groups_dict[g_id] = gname\n\nif -1 in groups_dict:\n print(\"Error joining the groups!\")\n exit()\n\ninput(\"Press enter to start communication: \")\n\nreceiveThread = Receiver()\nreceiveThread.daemon = True\nreceiveThread.start()\n\ntry:\n while 1:\n try:\n grp_id = int(input(\"Group id: \"))\n except ValueError:\n print(\"not valid grp_id\")\n continue\n if grp_id not in groups_dict:\n print(grp_id, \"not in groups_dict.\")\n continue\n msg = input(\"Message: \")\n grp_send(grp_id, msg)\nexcept :\n print(\"!\")\n\nprint(\"Going to leave\")\nfor group_id in groups_dict:\n leave(group_id)\n","repo_name":"oaxelou/DistributedSystems","sub_path":"hw2/user_app.py","file_name":"user_app.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"24279709277","text":"\n__copyright__ = 'Copyright 2022, The RADICAL-Cybertools Team'\n__license__ = 'MIT'\n\n\n# configure the psij logger (captured in the launch components stderr)\nimport logging\nlogging.basicConfig(level='DEBUG')\n\nimport threading as mt\n\nfrom .base import PilotLauncherBase\nfrom ... import states as rps\n\n# psij is optional\npsij = None\npsij_ex = None\n\ntry:\n import psij\nexcept ImportError as ex:\n psij_ex = ex\n\n\n# ------------------------------------------------------------------------------\n#\nclass PilotLauncherPSIJ(PilotLauncherBase):\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, name, log, prof, state_cb):\n\n\n # psij is an optional dependency - let an import exception fall through\n # to disable this pilot launcher\n if psij_ex:\n raise psij_ex\n\n assert psij\n\n PilotLauncherBase.__init__(self, name, log, prof, state_cb)\n\n self._jobs = dict() # map pilot id to psi_j job\n self._pilots = dict() # map psi_j id to pilot job\n self._jex = dict() # map launch schema to psi_j job executors\n self._lock = mt.RLock() # lock above structures\n\n\n # --------------------------------------------------------------------------\n #\n def _get_schema(self, rcfg):\n\n url = rcfg['job_manager_endpoint']\n schemas = url.split(':')[0].split('+')\n\n if 'ssh' in schemas:\n schemas.remove('ssh')\n\n if 'gsissh' in schemas:\n schemas.remove('gsissh')\n\n if len(schemas) > 1:\n return\n\n if not schemas:\n return\n\n schema = schemas[0]\n\n if schema == 'fork':\n schema = 'local'\n\n return schema\n\n\n # --------------------------------------------------------------------------\n #\n def _translate_state(self, status):\n\n if status.state == psij.JobState.NEW : return rps.NEW\n elif status.state == psij.JobState.QUEUED : return rps.PMGR_LAUNCHING\n elif status.state == psij.JobState.ACTIVE : return rps.PMGR_ACTIVE\n elif status.state == psij.JobState.COMPLETED : return rps.DONE\n elif status.state == psij.JobState.FAILED : return rps.FAILED\n elif status.state == psij.JobState.CANCELED : return rps.CANCELED\n else:\n raise ValueError('cannot interpret psij state: %s' % repr(status))\n\n\n # --------------------------------------------------------------------------\n #\n def _job_status_cb(self, job, status):\n\n try:\n with self._lock:\n\n if job.id not in self._pilots:\n return\n\n rp_state = self._translate_state(status)\n pilot = self._pilots[job.id]\n\n self._state_cb(pilot, rp_state)\n\n except Exception:\n self._log.exception('job status callback failed')\n\n\n # --------------------------------------------------------------------------\n #\n def can_launch(self, rcfg, pilot):\n\n schema = self._get_schema(rcfg)\n\n if not schema:\n return False\n\n if schema not in self._jex:\n\n self._log.debug('create executor for %s', schema)\n try:\n self._jex[schema] = psij.JobExecutor.get_instance(schema)\n self._jex[schema].set_job_status_callback(self._job_status_cb)\n except:\n self._log.exception('failed to create psij executor')\n return False\n\n return True\n\n\n # --------------------------------------------------------------------------\n #\n def launch_pilots(self, rcfg, pilots):\n\n assert psij\n\n for pilot in pilots:\n\n pid = pilot['uid']\n schema = self._get_schema(rcfg)\n assert schema\n\n jex = self._jex.get(schema)\n\n assert jex\n\n jd = pilot['jd_dict']\n\n proj, res = None, None\n if jd.project:\n if ':' in jd.project:\n proj, res = jd.project.split(':', 1)\n else:\n proj = jd.project\n\n attr = psij.JobAttributes()\n attr.duration = jd.wall_time_limit\n attr.queue_name = jd.queue\n attr.project_name = proj\n attr.reservation_id = res\n\n spec = psij.JobSpec()\n spec.attributes = attr\n spec.executable = jd.executable\n spec.arguments = jd.arguments\n spec.environment = jd.environment\n spec.directory = jd.working_directory\n spec.stdout_path = jd.output\n spec.stderr_path = jd.error\n\n spec.resources = psij.ResourceSpecV1()\n spec.resources.node_count = jd.node_count\n spec.resources.process_count = jd.total_cpu_count\n # spec.resources.cpu_cores_per_process = 1\n # spec.resources.gpu_cores_per_process = jd.total_gpu_count\n\n job = psij.Job(spec)\n\n self._jobs[pid] = job\n self._pilots[job.id] = pilot\n self._log.debug('added %s: %s', job.id, pid)\n\n jex.submit(job)\n\n\n # --------------------------------------------------------------------------\n #\n def kill_pilots(self, pids):\n\n for pid in pids:\n if pid not in pids:\n continue\n\n self._jobs[pid].cancel()\n\n\n# ------------------------------------------------------------------------------\n\n","repo_name":"radical-cybertools/radical.pilot","sub_path":"src/radical/pilot/pmgr/launching/psi_j.py","file_name":"psi_j.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"}
+{"seq_id":"40079620566","text":"from explosion import *\n\nfrom engine_files.entity import *\nclass Bullet(MovableEntity):\n def __init__(self, pos, game_map, speed, angle, damage, explosion_size, owner):\n super().__init__((pos[0] - 10, pos[1] - 5, 10, 10), game_map, speed)\n self.angle = angle\n self.img = self.img = get_transparent_surface(pygame.image.load('./assets/bullet.png'),(20, 10))\n\n self.x_shift = math.cos(math.radians(self.angle))*self.speed\n self.y_shift = math.sin(math.radians(self.angle))*self.speed\n self.explosion_size = explosion_size\n self.explosion_damage = 2\n\n self.owner = owner\n # whether the bullet can hit its owner\n self.self_damage = False\n self.damage = damage\n self.block_damage = self.damage\n self.collide_entities = []\n\n def bound_collide(self, bound):\n '''\n Explodes the bullet.\n Is called if a bound is colliding with a bullet.\n :param bound: The bound that the bullet is colliding with.\n :return: None\n '''\n if(not self.active):\n return\n self.explode()\n self.active = False\n\n def wall_collide(self, wall):\n '''\n Explodes the bullet.\n Is called if a wall is colliding with a bullet.\n :param wall: The wall that the bullet is colliding with.\n :return: None\n '''\n if(not self.active):\n return\n self.explode()\n self.active = False\n if (wall not in self.collide_entities):\n self.collide_entities.append(wall)\n\n def block_collide(self, block):\n '''\n Explodes the bullet.\n Is called if a block is colliding with a bullet.\n :param block: The bound that the bullet is colliding with.\n :return: None\n '''\n if(not self.active):\n return\n self.explode()\n self.active = False\n\n if (block not in self.collide_entities):\n block.entity_collide(self)\n self.collide_entities.append(block)\n\n def player_collide(self, player):\n '''\n Explodes the bullet.\n Is called if a player is colliding with a bullet.\n :param player: The bound that the bullet is colliding with.\n :return: None\n '''\n if(not self.active):\n return\n self.explode()\n self.active = False\n if (not self.self_damage):\n if (player == self.owner):\n return\n if(player not in self.collide_entities):\n player.take_damage(self.damage, self)\n self.collide_entities.append(player)\n\n def explode(self):\n '''\n Creates an explosion object in the center of the bullet object.\n :return: None\n '''\n if(self.active == True):\n pos = self.explosion_size / 2\n explosion = Explosion((self.centerx - pos, self.centery - pos),\n self.explosion_damage, self.explosion_size, self.game_map, 10, self.owner)\n explosion.self_damage = False\n self.game_map.entity_lst.append(explosion)\n\n def update(self, *args, **kwargs):\n '''\n Updates the bullet position.\n :param args:\n :param kwargs:\n :return: None\n '''\n if(not self.active):\n self.game_map.bullet_lst.remove(self)\n self.move_x = self.x_shift\n self.move_y = self.y_shift\n super().update(args, kwargs)\n\n def draw(self, surface):\n '''\n Draws bullets on surface.\n Bullets will be rotated if shot at different angles.\n :param surface: The surface the bullet will be drawn on.\n :return: None\n '''\n if(not self.active):\n return\n rotated_img, rotated_rect = centered_rotate(self.img, self, -self.angle)\n surface.blit(rotated_img, rotated_rect)\n\nclass Short_Bullet(Bullet):\n def __init__(self, pos, game_map, speed, angle, damage, explosion_size, owner, bullet_timer):\n super().__init__(pos, game_map, speed, angle, damage, explosion_size, owner)\n self.bullet_timer = bullet_timer\n self.img = self.img = get_transparent_surface(pygame.image.load('./assets/short_bullet.png'), (10, 10))\n\n def bullet_despawn(self):\n '''\n Explodes the bullet.\n Is called if the bullet timer hits 0.\n :return: None\n '''\n self.explode()\n self.active = False\n\n def update(self, *args, **kwargs):\n '''\n Updates the bullet timer.\n :param args:\n :param kwargs:\n :return: None\n '''\n self.bullet_timer = self.bullet_timer - 1\n if (self.bullet_timer <= 0):\n self.bullet_despawn()\n super().update(args, kwargs)\n\n def draw(self, surface):\n '''\n Draws short bullets on surface.\n Short bullets will be rotated if shot at different angles.\n :param surface: The surface the bullet will be drawn on.\n :return: None\n '''\n if (not self.active):\n return\n rotated_img, rotated_rect = centered_rotate(self.img, self, -self.angle)\n surface.blit(rotated_img, rotated_rect)\n\nclass Ranger_Bullet(Bullet):\n def __init__(self, pos, game_map, speed, angle, damage, explosion_size, owner, bullet_scale_timer, bullet_scale_amount, bullet_scale_cap):\n super().__init__(pos, game_map, speed, angle, damage, explosion_size, owner)\n self.bullet_scale_timer = bullet_scale_timer\n self.bullet_scale_amount = bullet_scale_amount\n self.bullet_scale_cap = bullet_scale_cap\n self.bullet_scale_time = bullet_scale_timer\n self.damage = damage\n self.explosion_size = explosion_size\n self.img = self.img = get_transparent_surface(pygame.image.load('./assets/ranger_bullet.png'), (20, 10))\n\n def update(self, *args, **kwargs):\n self.bullet_scale_timer = self.bullet_scale_timer - 1\n if (self.bullet_scale_timer <= 0 and self.damage < self.bullet_scale_cap):\n self.damage = self.damage + self.bullet_scale_amount\n self.explosion_size = self.explosion_size + self.bullet_scale_amount\n self.bullet_scale_timer = self.bullet_scale_time\n print(self.damage)\n super().update(args, kwargs)\n\n def draw(self, surface):\n if (not self.active):\n return\n rotated_img, rotated_rect = centered_rotate(self.img, self, -self.angle)\n surface.blit(rotated_img, rotated_rect)\n\nclass Homing_Bullet(Bullet):\n def __init__(self, pos, game_map, speed, angle, damage, explosion_size, owner, target, max_turn, turn_timer):\n super().__init__(pos, game_map, speed, angle, damage, explosion_size, owner)\n # self.speed = speed\n self.max_turn = max_turn\n self.turn_timer = turn_timer\n self.turn_time = self.turn_timer\n # self.speed_timer = 10\n self.target = target\n self.img = self.img = get_transparent_surface(pygame.image.load('./assets/homing_bullet.png'), (20, 10))\n\n def update(self, *args, **kwargs):\n new_angle = get_point_angle(self.center, self.target.center, False)\n angle_dif = new_angle - self.angle\n # print(str(new_angle)+' '+str(self.angle)+' '+str(angle_dif))\n if (angle_dif <-180):\n angle_dif = angle_dif+360\n if (angle_dif > self.max_turn):\n angle_dif = self.max_turn\n if (angle_dif < -self.max_turn):\n angle_dif = -self.max_turn\n self.angle = self.angle + angle_dif\n self.x_shift = math.cos(math.radians(self.angle)) * self.speed\n self.y_shift = math.sin(math.radians(self.angle)) * self.speed\n self.turn_timer = self.turn_timer - 1\n if(self.turn_timer <= 0 and self.max_turn != 0):\n self.max_turn = self.max_turn - 1\n self.turn_time = self.turn_time - 1\n self.turn_timer = self.turn_time\n # self.speed_timer = self.speed_timer -1\n # if(self.speed_timer <= 0 and self.speed > 3):\n # self.speed = self.speed - 0.2\n # self.speed_timer = 10\n # print(self.angle)\n super().update(args, kwargs)\n\n def draw(self, surface):\n if (not self.active):\n return\n rotated_img, rotated_rect = centered_rotate(self.img, self, -self.angle)\n surface.blit(rotated_img, rotated_rect)\n\nclass Explode_Bullet(Bullet):\n def __init__(self, pos, game_map, speed, angle, damage, explosion_size, owner, weapon):\n super().__init__(pos, game_map, speed, angle, damage, explosion_size, owner)\n self.weapon = weapon\n self.img = self.img = get_transparent_surface(pygame.image.load('./assets/explode_bullet.png'), (20, 10))\n self.explosion_damage = 8\n\n def bullet_explode(self):\n self.explode()\n self.active = False\n\n def explode(self):\n if (self.active == True):\n pos = self.explosion_size / 2\n explosion = Mine_Explosion((self.centerx - pos, self.centery - pos),\n self.explosion_damage, self.explosion_size, self.game_map, 10, self.owner)\n explosion.self_damage = False\n explosion.damage = self.explosion_damage\n self.game_map.entity_lst.append(explosion)\n self.weapon.last_bullet = None\n\n def draw(self, surface):\n if (not self.active):\n return\n rotated_img, rotated_rect = centered_rotate(self.img, self, -self.angle)\n surface.blit(rotated_img, rotated_rect)\n\nclass Target_Bullet(Homing_Bullet):\n def __init__(self, pos, game_map, speed, angle, damage, explosion_size, owner, target, max_turn, turn_timer, bullet_timer):\n super().__init__(pos, game_map, speed, angle, damage, explosion_size, owner, target, max_turn, turn_timer)\n self.img = self.img = get_transparent_surface(pygame.image.load('./assets/homing_bullet.png'), (20, 10))\n self.bullet_timer = bullet_timer\n\n def bullet_despawn(self):\n '''\n Explodes the bullet.\n Is called if the bullet timer hits 0.\n :return: None\n '''\n self.explode()\n self.active = False\n\n def update(self, *args, **kwargs):\n self.bullet_timer = self.bullet_timer - 1\n if (self.bullet_timer <= 0):\n self.bullet_despawn()\n # if(not self.active):\n # self.owner.cannon.has_bullet = False\n super().update(args, kwargs)\n","repo_name":"ningning-1234/TankGame","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":10486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"22731781160","text":"from utils import timer\n\nclass Solution:\n\n # Solution 1: Greedy: find peak & valley\n # @timer\n # def maxProfit(self, prices):\n # profit, buy = 0, None\n\n # for i in range(len(prices)):\n # if i == len(prices) - 1:\n # if buy is not None: profit += (prices[i] - buy)\n # elif prices[i] > prices[i+1] and buy is not None:\n # profit += (prices[i] - buy) ; buy = None\n # elif prices[i] < prices[i+1] and buy is None:\n # buy = prices[i]\n\n # return profit\n\n # Solution 2: Greedy: every greedy\n @timer\n def maxProfit(self, prices):\n profit = 0\n for i in range(len(prices)-1):\n slope = prices[i+1] - prices[i]\n if slope > 0: profit += slope\n \n return profit\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.maxProfit([7,1,5,3,6,4]))\n print(sol.maxProfit([1,2,3,4,5]))\n print(sol.maxProfit([7,6,4,3,1]))","repo_name":"melissakou/leetcode_practice","sub_path":"solutions/0122_Best_Time_to_Buy_and_Sell_Stock_II.py","file_name":"0122_Best_Time_to_Buy_and_Sell_Stock_II.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"71088213512","text":"import pandas\r\nimport turtle\r\n\r\n\r\nclass state:\r\n def __init__(self):\r\n self.read_file()\r\n self.score=0\r\n\r\n def read_file(self):\r\n self.data=pandas.read_csv(\"50_states.csv\")\r\n self.name_state=self.data[\"state\"].to_list()\r\n\r\n def check_answer(self,user_answer):\r\n for correct_answer in self.name_state:\r\n if correct_answer==user_answer:\r\n self.score+=1\r\n else:\r\n self.dict_lear=[]\r\n self.dict_lear.append(user_answer)\r\n data =pandas.DataFrame( self.dict_lear)\r\n data.to_csv(\"learn.csv\")\r\n\r\n def position_state(self,user_answer):\r\n self.post_state=self.data[self.data.state==user_answer]\r\n self.x_pos=int(self.post_state[\"x\"])\r\n self.y_pos=int(self.post_state[\"y\"])\r\n turtle.penup()\r\n turtle.hideturtle()\r\n turtle.goto(x=self.x_pos,y=self.y_pos)\r\n turtle.write(arg=f\"{user_answer}\",font=(\"Arial\",10,\"normal\"))\r\n\r\n\r\n\r\n\r\n","repo_name":"JakAnushka/us_state_game","sub_path":"state_access.py","file_name":"state_access.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"29761628100","text":"'''\nCreated on Oct 21, 2022\n\n@author: estudiante\n'''\npeso=float(input(\"Cuanto pesas?: \"))\n\nwhile peso>0:\n edad=int(input(\"Cuantos anos tienes?: \"))\n vida=input(\"Que estilo de vida llevas? (Sedentaria, Activa or muy activa): \").upper()\n while (vida!=\"SEDENTARIA\" and vida!=\"ACTIVA\" and vida!=\"MUY ACTIVA\") or (peso<10 or peso>180) or (edad<0 or edad>110):\n if vida!=\"SEDENTARIA\" and vida!=\"ACTIVA\" and vida!=\"MUY ACTIVA\":\n print(\"Datos Erroneos\")\n vida=input(\"Que estilo de vida llevas? (Sedentaria, Activa or muy activa): \").upper() \n if peso<10 or peso>180:\n print(\"Datos Erroneos\")\n peso=float(input(\"Cuanto pesas?: \"))\n if edad<0 or edad>110:\n print(\"Datos Erroneos\")\n edad=int(input(\"Cuantos anos tienes?: \"))\n \n if (edad>70 and vida==\"SEDENTARIA\") or (peso>100) or (peso>74.4 and edad)>50:\n print(\"Le recomendamos ir al medico\")\n else:\n print(\"No es urgente que acuda al médico si no tiene problemas de salud\")\n peso=float(input(\"Cuanto pesas?: \"))\n \nprint(\"El programa ha terminado.\")\n \n \n \n ","repo_name":"RafaMoreno10/Programacion","sub_path":"PruebasIndividuales/Prueba_21oct.py","file_name":"Prueba_21oct.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"14072130468","text":"from random import randint\r\nimport turtle\r\n\r\nclass Point:\r\n\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n def falls_in_rectangle(self, rectangle):\r\n if rectangle.point1.x < self.x < rectangle.point2.x and \\\r\n rectangle.point1.y < self.y < rectangle.point2.y:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nclass Rectangle:\r\n\r\n def __init__(self, point1, point2):\r\n self.point1 = point1\r\n self.point2 = point2\r\n\r\n def area (self):\r\n return (self.point2.x - self.point1.x) * (self.point2.y - self.point1.y)\r\n\r\nclass GenerateRectangle(Rectangle):\r\n\r\n def draw(self, canvas):\r\n\r\n # Go to a certain coordinate\r\n canvas.penup()\r\n canvas.goto(self.point1.x, self.point1.y)\r\n\r\n canvas.pendown()\r\n canvas.forward(self.point2.x - self.point1.x)\r\n canvas.left(90)\r\n canvas.forward(self.point2.y - self.point1.y)\r\n canvas.right(90)\r\n canvas.forward(self.point1.x - self.point2.x)\r\n canvas.left(90)\r\n canvas.forward(self.point1.y - self.point2.y)\r\n\r\nclass GeneratePoint(Point):\r\n\r\n def draw(self, canvas):\r\n canvas.penup()\r\n canvas.goto(self.x, self.y)\r\n canvas.pendown()\r\n canvas.dot(5, \"green\")\r\n\r\n\r\nrectangle = GenerateRectangle( Point(randint(0,200), randint(0,200)),\\\r\n Point(randint(200,400), randint(200,400)))\r\n\r\nprint(\"The rectangle coordinates are:\", rectangle.point1.x, \", \", rectangle.point1.y, \"and\", rectangle.point2.x, \", \", rectangle.point2.y)\r\n\r\nuser_point = GeneratePoint(float(input(\"Your X value is: \")), float(input(\"Your Y value is: \")))\r\n\r\nuser_area = float(input(\"Which is the rectangle area? \"))\r\n\r\nprint(\"The point you chose falls in the rectangle? \", user_point.falls_in_rectangle(rectangle))\r\n\r\nprint(\"Your answer was: \")\r\nif rectangle.area() == user_area:\r\n print(\"Correct\")\r\nelse:\r\n print(\"Incorrect\")\r\n\r\nmy_turtle = turtle.Turtle()\r\nrectangle.draw(canvas=my_turtle)\r\nuser_point.draw(canvas=my_turtle)\r\n\r\nturtle.done()","repo_name":"cipriannegotei/Data-Analysis","sub_path":"Py Portfolio/Geometry-Game/Geometry.py","file_name":"Geometry.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"74377096071","text":"#!/usr/bin/env python3\n\nimport sys\n\nif len(sys.argv) < 2:\n exit(1)\n\ndef getneighbours(position):\n neighboursd = (\n (1,0,0),\n (-1,0,0),\n (0,1,0),\n (0,-1,0),\n (0,0,1),\n (0,0,-1)\n )\n return {\n tuple(map(sum, zip(position, dt)))\n for dt in neighboursd\n }\n\ncubes = set()\nconnections = 0\nwith open(sys.argv[1]) as f:\n for line in f:\n newcube = tuple(\n map(\n int,\n line.strip().split(',')\n )\n )\n neighbours = getneighbours(newcube)\n connections += len(cubes & neighbours)\n cubes.add(newcube)\n\nprint(6 * len(cubes) - 2 * connections) # part 1\n\nminx = min(map(lambda t: t[0], cubes))\nminy = min(map(lambda t: t[1], cubes))\nminz = min(map(lambda t: t[2], cubes))\nmaxx = max(map(lambda t: t[0], cubes))\nmaxy = max(map(lambda t: t[1], cubes))\nmaxz = max(map(lambda t: t[2], cubes))\n\n# We build a cuboid larger on all sides from the droplet and fill it, as if with\n# water\ncminx = minx - 1\ncminy = miny - 1\ncminz = minz - 1\ncmaxx = maxx + 1\ncmaxy = maxy + 1\ncmaxz = maxz + 1\n\nwaters = set()\nfaces_exposed_to_water = 0\nstartwater = (cminx, cminy, cminz)\ntofill = { startwater }\nwhile tofill:\n water = tofill.pop()\n waters.add(water)\n\n neighbours = getneighbours(water)\n\n incuboidneighbours = {\n n\n for n in neighbours\n if (cminx <= n[0] <= cmaxx\n and cminy <= n[1] <= cmaxy\n and cminz <= n[2] <= cmaxz)\n }\n nonwaterneighbours = incuboidneighbours - waters\n\n faces_exposed_to_water += len(nonwaterneighbours & cubes)\n airneighbours = nonwaterneighbours - cubes\n tofill.update(airneighbours)\n\nprint(faces_exposed_to_water) # part 2\n","repo_name":"cigix/adventofcode2022","sub_path":"day18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"23440838073","text":"from flask import Blueprint, jsonify\nimport csv\n\ncityData = Blueprint(\"cityData\", __name__)\n\n@cityData.route(\"/get/data/\")\ndef get_city_data(city):\n city_data = {}\n with open(\"./assets/city.csv\", \"r\") as file:\n reader = csv.DictReader(file)\n for row in reader:\n if row[\"City\"].lower() == city.lower():\n city_data = {'city':row[\"City\"],\n 'population_density':row['Population_Density'],\n 'unemployment_rate':row[\"Unemployment_Rate\"],\n 'clean_water_availability':row['Clean_Water_Availability'],\n 'Air_Quality_Index':row[\"Air_Quality_Index\"],\n 'literacy_rate':f'{row[\"Literacy_Rate\"]}',\n 'crime_rate':row[\"Crime_Rate\"],\n 'Average_Temperature_Rate':row[\"Average_Temperature_Rate\"],\n \n }\n return city_data\n return jsonify(\"city not in db\")","repo_name":"krishna1804g/crud_functionalities","sub_path":"main/cityData.py","file_name":"cityData.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"46272926004","text":"from collections import defaultdict\nfrom dataclasses import dataclass\nimport enum\nfrom typing import List, Dict, Optional\n\nimport toloka.client as toloka\n\nfrom .. import evaluation, mapping, worker as workers\n\n\nclass SolutionVerdict(enum.Enum):\n BAD = 0\n UNKNOWN = 1\n OK = 2\n\n\n@dataclass\nclass Solution:\n solution: mapping.Objects\n verdict: SolutionVerdict\n evaluation: Optional[evaluation.SolutionEvaluation]\n assignment_accuracy: float # ratio of correctly completed tasks in source assignment\n assignment_evaluation_recall: float # ratio of evaluated tasks in source assignment\n worker: workers.Worker\n\n def __lt__(self, other: 'Solution') -> bool:\n if self.verdict.value != other.verdict.value:\n return self.verdict.value < other.verdict.value\n\n assert (\n self.evaluation is None\n and other.evaluation is None\n or self.evaluation is not None\n and other.evaluation is not None\n )\n\n if self.evaluation is not None and self.evaluation.confidence != other.evaluation.confidence:\n return self.evaluation.confidence < other.evaluation.confidence\n\n def f1(p, r):\n return 2 * (p * r) / (p + r)\n\n return f1(self.assignment_accuracy, self.assignment_evaluation_recall) < f1(\n other.assignment_accuracy, other.assignment_evaluation_recall\n )\n\n\nResults = List[List[Solution]]\n\n\ndef get_results(\n pool_input_objects: List[mapping.Objects],\n markup_assignments: List[mapping.AssignmentSolutions],\n solution_id_to_evaluation: Dict[mapping.TaskID, evaluation.SolutionEvaluation],\n markup_task_mapping: mapping.TaskMapping,\n check_task_mapping: mapping.TaskMapping,\n) -> Results:\n assert all(\n assignment.status in (toloka.Assignment.ACCEPTED, toloka.Assignment.REJECTED)\n for assignment, _ in markup_assignments\n )\n\n assignment_accuracy_evaluation_strategy = evaluation.CheckAssignmentAccuracyEvaluationStrategy(\n solution_id_to_evaluation, check_task_mapping\n )\n assignments_evaluations = [\n assignment_accuracy_evaluation_strategy.evaluate_assignment(assignment) for assignment in markup_assignments\n ]\n assignment_id_to_accuracy = {e.assignment.id: e.get_accuracy() for e in assignments_evaluations}\n assignment_id_to_evaluation_recall = {e.assignment.id: e.get_evaluation_recall() for e in assignments_evaluations}\n\n task_id_to_result = defaultdict(list)\n for assignment, solutions in markup_assignments:\n for input_objects, output_objects in solutions:\n task_id = markup_task_mapping.task_id(input_objects)\n solution_id = check_task_mapping.task_id(input_objects + output_objects)\n solution_evaluation = solution_id_to_evaluation.get(solution_id)\n verdict = SolutionVerdict.UNKNOWN\n if solution_evaluation:\n verdict = SolutionVerdict.OK if solution_evaluation.ok else SolutionVerdict.BAD\n solution = Solution(\n solution=output_objects,\n worker=workers.Human(assignment),\n verdict=verdict,\n evaluation=solution_evaluation,\n assignment_accuracy=assignment_id_to_accuracy[assignment.id],\n assignment_evaluation_recall=assignment_id_to_evaluation_recall[assignment.id],\n )\n task_id_to_result[task_id].append(solution)\n\n for solutions in task_id_to_result.values():\n solutions.sort(reverse=True)\n\n # order results in pool input order\n results = []\n for input_objects in pool_input_objects:\n results.append(task_id_to_result[markup_task_mapping.task_id(input_objects)])\n\n return results\n","repo_name":"lambdazy/crowdom","sub_path":"src/feedback_loop/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"27"}
+{"seq_id":"24279421447","text":"#!/usr/bin/env python3\n\n__copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu'\n__license__ = 'MIT'\n\nimport os\nimport sys\nimport random\n\nimport radical.pilot as rp\nimport radical.utils as ru\n\npwd = os.path.abspath(os.path.dirname(__file__))\n\n\n# ------------------------------------------------------------------------------\n#\nif __name__ == '__main__':\n\n report = ru.Reporter(name='radical.pilot')\n report.title('Getting Started (RP version %s)' % rp.version)\n\n # use the resource specified as argument, fall back to localhost\n if len(sys.argv) > 1: resource = sys.argv[1]\n else : resource = 'local.localhost'\n\n session = rp.Session()\n\n try:\n # read the config used for resource details\n report.info('read config')\n config = ru.read_json('%s/../config.json' % pwd)\n report.ok('>>ok\\n')\n\n report.header('submit pilots')\n\n pd_init = {'resource' : resource,\n 'runtime' : 60, # pilot runtime (min)\n 'exit_on_error' : True,\n 'cores' : 32\n }\n pdesc = rp.PilotDescription(pd_init)\n pmgr = rp.PilotManager(session=session)\n pilot = pmgr.submit_pilots(pdesc)\n\n report.header('submit bags of tasks')\n\n tmgr = rp.TaskManager(session=session)\n tmgr.add_pilots(pilot)\n\n # run N bags of tasks, where each bag contains M tasks of different\n # sizes. All tasks within the same bag are to get scheduled on the\n # same node (colocated)\n\n n_bags = 2\n bag_size = 3\n task_size = [5, 1, 4]\n\n assert len(task_size) == bag_size\n\n tds = list()\n for b in range(n_bags):\n for tid,s in enumerate(task_size):\n td = rp.TaskDescription()\n td.executable = '%s/colocated_task.sh' % pwd\n td.arguments = [b, bag_size, tid]\n td.ranks = s\n td.tags = {'colocate': {'bag' : b, 'size': bag_size}}\n td.name = 'b%03d-t%03d' % (b, tid)\n print(td.name)\n tds.append(td)\n report.progress()\n\n random.shuffle(tds)\n\n tmgr.submit_tasks(tds)\n\n report.header('gather results')\n tmgr.wait_tasks()\n\n\n except Exception as e:\n # Something unexpected happened in the pilot code above\n report.error('caught Exception: %s\\n' % e)\n ru.print_exception_trace()\n raise\n\n except (KeyboardInterrupt, SystemExit) as e:\n # the callback called sys.exit(), and we can here catch the\n # corresponding KeyboardInterrupt exception for shutdown. We also catch\n # SystemExit (which gets raised if the main threads exits for some other\n # reason).\n ru.print_exception_trace()\n report.warn('exit requested with %s\\n' % e)\n\n finally:\n # always clean up the session, no matter if we caught an exception or\n # not. This will kill all remaining pilots.\n report.header('finalize')\n session.close(download=True)\n\n report.header()\n\n\n# ------------------------------------------------------------------------------\n\n","repo_name":"radical-cybertools/radical.pilot","sub_path":"examples/misc/colocated.py","file_name":"colocated.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"27"}
+{"seq_id":"9179367477","text":"from pymongo import MongoClient\nfrom os import path, listdir\nimport bz2\nimport json\n\ndata_path = '/media/luungoc2005/Data/Projects/Data/2015_reddit_comments_corpus/reddit_data'\n\nclient = MongoClient('localhost', 27017)\n\ndb = client['reddit_texts']\nall_collection = db['all']\n\nsub_dirs = [\n path.join(data_path, item) for item in listdir(data_path) \n if path.isdir(path.join(data_path, item))\n]\n\n\n# take the first 3 years?\nsub_dirs = sub_dirs[:3]\n\nfor sub_dir in sub_dirs:\n print(sub_dir)\n sub_files = [\n path.join(sub_dir, item) for item in listdir(sub_dir) \n if item.endswith('.bz2')\n ]\n for sub_file in sub_files:\n print('Processing ' + sub_file)\n with bz2.open(sub_file, 'r') as f:\n # file contents:\n contents = [json.loads(line) for line in f.readlines()]\n all_collection.insert_many(contents, ordered=False)","repo_name":"luungoc2005/transformer-chatbot-test","sub_path":"data_reddit/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"32422828771","text":"from typing import List\nfrom collections import defaultdict\nfrom itertools import accumulate\n\nclass Solution:\n def numSubvectorSumTarget(self, nums: List[int], target: int) -> int:\n # solver over 1D, O(N)\n d, count = defaultdict(lambda: 0), 0\n # prefix sum over nums\n d[0] = 1\n for x in accumulate(nums):\n count += d[x - target]\n d[x] += 1\n return count\n def numSubmatrixSumTarget(self, matrix: List[List[int]], target: int) -> int:\n # O(min((R^2)*C, R*(C^2)))\n m, n = len(matrix), len(matrix[0])\n if m > n:\n m, n, matrix = n, m, list(map(list, zip(*matrix)))\n # prefix sum over cols\n for i in range(1, m):\n for j in range(n):\n matrix[i][j] += matrix[i - 1][j]\n # count\n count = 0\n # 1D over each rows\n for i in range(m):\n count += self.numSubvectorSumTarget(matrix[i], target)\n # 1D over each pair of rows\n for j in range(m):\n for i in range(j):\n count += self.numSubvectorSumTarget([y - x for x, y in zip(matrix[i], matrix[j])], target)\n return count\n\nif __name__ == '__main__':\n solver = Solution()\n cases = [\n ([[1,-1],[-1,1]], 0),\n ([[0,1,0],[1,1,1],[0,1,0]], 0),\n ([[0,0,0,1,1],[1,1,1,1,1],[0,1,0,0,0],[0,1,0,0,0],[1,1,1,1,0],[1,1,1,0,1]], 0),\n ]\n rslts = [solver.numSubmatrixSumTarget(matrix, target) for matrix, target in cases]\n for cs, rs in zip(cases, rslts):\n print(f\"case: {cs} | solution: {rs}\")\n","repo_name":"gyang274/leetcode","sub_path":"src/1000-1099/1074.num.submatrix.sum.target.py","file_name":"1074.num.submatrix.sum.target.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"16445361445","text":"from helpers import * #@UnresolvedImport\nimport cv2\nimport dlib#@UnresolvedImport\nimport time\nimport sys\n\nclass tracking:\n # 定数定義\n ESC_KEY = 27 # Escキー\n INTERVAL= 33 # 待ち時間\n FRAME_RATE = 30 # fps\n\n DEVICE_ID = 0\n\n # 分類器の初期設定\n face_cascade, eye_cascade, mouth_cascade, nose_cascade = index_cascade()\n\n # ウィンドウの準備\n cv2.namedWindow(\"tracking\")\n\n\n # 画像の取得と顔の検出\n img = cv2.imread(\"/Users/yokouchiryouta/Desktop/顔データ/検証用画像データ/顔領域/Tsuboi_1.jpg\")\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # 輪郭、目、鼻、口を画像上から探す\n face_list, eye_list, mouth_list, nose_list, recognition_flag = find_face_parts(img_gray, face_cascade, eye_cascade, mouth_cascade, nose_cascade)\n if not recognition_flag:\n print(\"顔検出に失敗しました。画像を撮りなおしてください。\")\n sys.exit()\n\n # 検出した顔に印を付ける\n tracking(img, face_list, color = (0, 0, 225))\n\n # 検出した目に印を付ける\n tracking(img, eye_list, color = (0, 225, 0))\n\n # 検出した口に印を付ける\n tracking(img, mouth_list, color = (255, 0, 0))\n\n # 検出した鼻に印を付ける\n tracking(img, nose_list, color = (255, 255, 255))\n\n # フレーム表示\n cv2.imshow(\"tracking\", img)\n\n cv2.waitKey(0)\n\n # 終了処理\n cv2.destroyAllWindows()\n\n","repo_name":"RyotaYokouchi/lab_dev","sub_path":"lab/lab/image_tracking.py","file_name":"image_tracking.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13897535831","text":"from PIL import Image\nimport numpy as np\nimport os\nimport math\n\ndef save_arr_as_img_l(arr, pic_name, file_name, extension):\n img = Image.fromarray(arr).convert(mode='L')\n img.save(file_name + '_' + pic_name + extension)\n\n\ndef save_arr_as_img_rgb(img, pic_name, file_name, extension):\n print(file_name + '_' + pic_name + extension)\n img.save(file_name + '_' + pic_name + extension)\n\n\ndef apply_threshold(value):\n \"Returns 0 or 255 depending where value is closer\"\n return 255 * math.floor(value/128)\n\ndef fs_l(file_name, new_img=None):\n if not new_img:\n new_img = Image.open(file_name)\n new_img = new_img.convert('L')\n pixel = new_img.load() \n x_lim, y_lim = new_img.size\n\n for y in range(y_lim):\n for x in range(x_lim):\n oldpixel = pixel[x, y]\n newpixel = apply_threshold(oldpixel)\n pixel[x, y] = newpixel \n quant_error = oldpixel - newpixel\n \n if x < x_lim - 1:\n pixel[x + 1, y] += round(quant_error * 7 / 16)\n\n if x > 1 and y < y_lim - 1:\n pixel[x - 1, y + 1] += round(quant_error * 3 / 16)\n\n if y < y_lim - 1:\n pixel[x, y + 1] += round(quant_error * 5 / 16)\n\n if x < x_lim - 1 and y < y_lim - 1:\n pixel[x + 1, y + 1] += round(quant_error * 1 / 16)\n \n return new_img\n\n\ndef fs_rgb(file_name):\n new_img = Image.open(file_name)\n new_img = new_img.convert('RGB')\n imgs = new_img.split()\n out = []\n for img in imgs:\n out.append(fs_l(file_name, new_img=img))\n return Image.merge('RGB', out)\n\n\ndef floyd_steinberg(file_name, mode='RGB'):\n print(file_name, mode)\n if mode == 'L':\n return fs_l(file_name)\n else:\n return fs_rgb(file_name)\n\n\ndef bayer_matrix(n, transposed=False):\n return np.array((1 + index_matrix(n, transposed)) / (1 + (n * n)))\n\n\ndef index_matrix(n, transposed=False):\n if n == 2:\n if transposed:\n return np.array([[0, 3], [2, 1]], 'int')\n else:\n return np.array([[0, 2], [3, 1]], 'int')\n else:\n smaller = index_matrix(n >> 1, transposed)\n if transposed:\n return np.bmat([[4 * smaller, 4 * smaller + 3],\n [4 * smaller + 2, 4 * smaller + 1]])\n else:\n return np.bmat([[4 * smaller, 4 * smaller + 2],\n [4 * smaller + 3, 4 * smaller + 1]])\n\n\ndef b_l(file_name, order=8, new_img=None):\n if not new_img:\n new_img = Image.open(file_name)\n new_img = new_img.convert('L')\n pixel = new_img.load() \n x_lim, y_lim = new_img.size\n bm = bayer_matrix(order)\n\n for y in range(y_lim):\n for x in range(x_lim):\n c = int(pixel[x, y] + 256/2*(bm[x%order][x%order]-0.5))\n if c >= 128:\n pixel[x, y] = 255\n else:\n pixel[x, y] = 0\n return new_img\n\n\ndef b_rgb(file_name, order=8):\n new_img = Image.open(file_name)\n new_img = new_img.convert('RGB')\n imgs = new_img.split()\n out = []\n for img in imgs:\n out.append(b_l(file_name, new_img=img, order=order))\n return Image.merge('RGB', out)\n\n\ndef bayer(file_name, mode='RGB'):\n print(file_name, mode)\n if mode == 'L':\n return b_l(file_name)\n else:\n return b_rgb(file_name)\n\n\ndef dithering(file_name, mode='RGB', alg='floyd_steinberg'):\n if alg == 'bayer':\n return bayer(file_name, mode=mode)\n else:\n return floyd_steinberg(file_name, mode=mode)\n\n\nfile_name = r'D:/test/n2.jpg'\nfile, ext = os.path.splitext(file_name)\n\narr = dithering(file_name, mode='RGB', alg='floyd_steinberg')\nsave_arr_as_img_rgb(arr, 'fs_rgb', file, ext)","repo_name":"comrados/PythonASR","sub_path":"dithering.py","file_name":"dithering.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"42236285020","text":"#Definindo funcoes\n\ndef algoritmo_euclidiano(a,b):\n # alfa*a + beta*b = mdc\n ao = a\n bo = b\n xo = 1\n xi = 0\n yo = 0\n yi = 1\n if (a!=0)and(b!=0):\n q = a//b\n while (b>0):\n # operacoes de divisao\n d = b\n b = a%b\n a = d\n if (b!=0):\n # operacoes entre x e y\n dummyx = xi\n dummyy = yi\n xi = xo - q*xi\n yi = yo - q*yi\n xo = dummyx\n yo = dummyy\n #fim operacoes x e y\n q = a//b\n mdc = (xi*ao)+(yi*bo)\n #O xi eh o alfa e o yi eh o beta!\n return mdc\n#fim Euclidiano\n\ndef grupo_mdc1(n):\n #num eh o numero em si\n vetor = []\n vetor.insert(len(vetor),1)\n i = 2\n while (i ListNode:\n dummy = ListNode(-1)\n cur = dummy\n carry = 0\n while(l1 is not None and l2 is not None):\n sums = l1.val + l2.val + carry\n l1 = l1.next\n l2 = l2.next\n cur.next = ListNode(sums % 10)\n carry = floor(sums / 10)\n cur = cur.next\n \n while(l1):\n sums = carry + l1.val\n carry = floor(sums/10)\n cur.next = ListNode(sums % 10)\n cur = cur.next\n l1 = l1.next\n while(l2):\n sums = carry + l2.val\n carry = floor(sums/10)\n cur.next = ListNode(sums % 10)\n cur = cur.next\n l2 = l2.next\n \n if carry != 0:\n cur.next = ListNode(carry)\n return dummy.next\n","repo_name":"ahsanI1/Leetcode","sub_path":"AddTwoNumbers.py","file_name":"AddTwoNumbers.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"23846511593","text":"import tensorflow as tf\n\n\ndef dense_to_sparse(dense_tensor, sequence_length):\n indices = tf.where(tf.sequence_mask(sequence_length))\n values = tf.gather_nd(dense_tensor, indices)\n shape = tf.shape(dense_tensor, out_type=tf.int64)\n return tf.SparseTensor(indices, values, shape)\n\n\ndef to_float(x):\n return tf.cast(x, tf.float32)\n\n\ndef weights_nonzero(labels):\n return to_float(tf.not_equal(labels, 0))\n\n\ndef ctc_loss(logits, logits_lengths, targets, targets_lengths, weights_fn=weights_nonzero):\n with tf.name_scope('ctc_loss', values=[logits, targets]):\n logits = tf.transpose(logits, [1, 0, 2])\n\n xent = tf.nn.ctc_loss(\n labels=dense_to_sparse(targets, targets_lengths),\n inputs=logits,\n sequence_length=logits_lengths,\n ignore_longer_outputs_than_inputs=True,\n )\n weights = weights_fn(targets)\n return tf.reduce_mean(xent), tf.reduce_sum(xent), tf.reduce_sum(weights)\n\n\ndef keras_ctc_loss(\n logits, logits_lengths, targets, weights_fn=weights_nonzero\n):\n logits = tf.nn.softmax(logits)\n targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))\n targets_lengths = tf.reduce_sum(targets_mask, axis=1)\n xent = tf.keras.backend.ctc_batch_cost(\n targets,\n logits,\n tf.expand_dims(logits_lengths, -1),\n tf.expand_dims(targets_lengths, -1),\n )\n weights = weights_fn(targets)\n return tf.reduce_mean(xent), tf.reduce_sum(xent), tf.reduce_sum(weights)\n","repo_name":"mesolitica/malaya-speech","sub_path":"malaya_speech/train/model/ctc/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"27"}
+{"seq_id":"9444208734","text":"#Name: Terry Su\r\n#Date: March 18, 2021\r\n#Purpose: CCC 2021 senior questions\r\n\r\n#Q1\r\nfences = int(input())\r\nheights= [int(x) for x in input().split(' ')]\r\nwidths = [int(x) for x in input().split(' ')]\r\n\r\narea = 0\r\n\r\nfor x in range(0,fences):\r\n area += ((heights[x] + heights[x+1]) / 2) * widths[x]\r\n\r\nprint(area)\r\n\r\n#Q2(exceeded time limit for last subtask but correct algorithm)\r\n\r\n#set up 2D array and change testues according to each brush\r\n\r\nrow = int(input())\r\ncolumn = int(input())\r\n\r\ngrid = []\r\nfor x in range(0,row):\r\n grid.append(['B' for r in range(0,column)])\r\n\r\n\r\nbrushes = []\r\n\r\nbrush = int(input())\r\nfor x in range(0,brush):\r\n b = input()\r\n brushes.append(b)\r\n\r\n\r\n#solve\r\nfor stroke in brushes:\r\n direction = stroke[0:1]\r\n specif = int(stroke[2:]) - 1\r\n\r\n if direction == 'R':\r\n for index,color in enumerate(grid[specif]):\r\n if color == 'B':\r\n grid[specif][index] = 'G'\r\n else:\r\n grid[specif][index] = 'B'\r\n\r\n else:\r\n for x in range(0,row):\r\n if grid[x][specif] == 'B':\r\n grid[x][specif] = 'G'\r\n\r\n elif grid[x][specif] == 'G':\r\n grid[x][specif] = 'B'\r\n \r\ncount = 0\r\nfor x in grid:\r\n for y in x:\r\n if y == 'G':\r\n count += 1\r\n\r\nprint(count)\r\n\r\n#Q3 (exceeded time limit for 3 subtasks but correct algorithm)\r\n\r\n#move the concert across each possible spot along the number line\r\n#for each spot, calculate the total walking time of all friends together\r\n\r\nN = int(input())\r\nstats = []\r\ntimes = []\r\nmax_pos = 0\r\nmin_pos = 1000000 \r\n\r\nfor x in range(0,N):\r\n \r\n stat = input()\r\n stats.append(stat.split())\r\n \r\n max_pos = max(max_pos, int(stat.split()[0]))\r\n min_pos = min(min_pos, int(stat.split()[0]))\r\n\r\ndef calc_time(c): #calculating total time for each position of concert\r\n global times\r\n global stats\r\n\r\n time = 0\r\n\r\n for friend in stats:\r\n \r\n friend[0] = int(friend[0])\r\n friend[1] = int(friend[1])\r\n friend[2] = int(friend[2])\r\n \r\n if friend[0] > c:\r\n if friend[0] - c > friend[2]:\r\n time += (friend[0] - c - friend[2]) * friend[1]\r\n\r\n elif friend[0] < c:\r\n if c - friend[0] > friend[2]:\r\n time += (c - friend[0] - friend[2]) * friend[1]\r\n\r\n times.append(time)\r\n \r\n\r\n \r\nif max_pos == min_pos:\r\n print(0)\r\n\r\nelse:\r\n for c in range(min_pos, max_pos + 1):\r\n calc_time(c)\r\n \r\n print(min(times)) #return minimum total time\r\n\r\n#Q4 (exceeded time limit for 3 subtasks but correct algorithm)\r\n\r\n#N stations\r\n\r\n#one test only that runs in station order S1,S2...SN (you want to get to N ASAP) represented by integers\r\n#The order is not necessarily 1,2,3,4... tho\r\n#test runs 1 min between stations\r\n\r\n#Theres also walkways from station Ai to Bi (takes 1 min to get there)\r\n\r\n#Route swaps Xith and Yith station everyday tho, ex: station order was [1,3,4]; X = 1, Y = 3 --> [4,3,1]\r\n\r\n#find minimum time to get to Sn (school) everyday\r\n\r\n#Collecting input\r\nN_W_D = [int(x) for x in input().split()]\r\n\r\nWalkways = {} #use hash table to match each station to array of stations you can get to though walkways\r\nfor x in range(0,N_W_D[1]):\r\n Curr = [int(x) for x in input().split()]\r\n \r\n if Curr[0] in Walkways:\r\n Walkways[Curr[0]].append(Curr[1])\r\n\r\n else:\r\n Walkways[Curr[0]] = [Curr[1]]\r\n\r\n\r\nStation_Order = [int(x) for x in input().split()]\r\n\r\nDaily_Swap = []\r\nfor x in range(0,N_W_D[2]):\r\n Daily_Swap.append([int(x)-1 for x in input().split()]) #indexes to swap instead of station #s\r\n\r\n\r\n#Solving\r\n\r\ndef Day(Day_Number):\r\n \r\n global Station_Order #do the daily flip first\r\n Station_Order[Daily_Swap[Day_Number-1][0]], Station_Order[Daily_Swap[Day_Number-1][1]] = Station_Order[Daily_Swap[Day_Number-1][1]], Station_Order[Daily_Swap[Day_Number-1][0]]\r\n \r\n Times = [] #keeps track of all possible times to get to station N\r\n \r\n Max_Time = Station_Order.index(N_W_D[0]) #maximum time to get there would be a direct bus, once all route possbilities up to that time are calculated,\r\n #break the calculations \r\n Times.append(Max_Time)\r\n\r\n def Branch(Time, Station): #Branching walkways through recursion\r\n \r\n for Next in Walkways[Station]: #if station N is reached, add to array of possible times\r\n if Next == N_W_D[0]:\r\n Times.append(Time+1)\r\n \r\n if Next in Walkways and Time < min(Times): #if a minimum time has been found, theres no point to compute further possibilities\r\n Branch(Time + 1, Next) #Otherwise, keep branching possibilities\r\n\r\n Time = 0\r\n for Curr in Station_Order: #At each station check if there are walkways, otherwise keep riding the test\r\n \r\n if Time >= min(Times): #if a minimum time has been found, theres no point to compute further possibilities\r\n break\r\n\r\n if Curr in Walkways:\r\n Branch(Time, Curr)\r\n\r\n Time += 1\r\n\r\n return(min(Times))\r\n\r\nfor x in range(1,N_W_D[2] + 1): #Calculations/answer for each day\r\n ans = Day(x)\r\n print(ans)\r\n\r\n#Q5\r\n","repo_name":"terrysu64/Python-Projects","sub_path":"CCC senior contests/2021 S.py","file_name":"2021 S.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"2107346614","text":"__author__ = 'DarkStar1'\n\nimport fileinput, glob, os, re, shutil, sys, urllib\nfrom bs4 import BeautifulSoup\n\ndef encodeImgSrc(file, encodedFiles):\n #Wanted to use the lxml lib but for some reason it was only finding 1 result within the test file.\n soup = BeautifulSoup(file.read(), \"html5lib\")\n for img in soup.find_all('img'):\n # print(\"Found an image: \" + img['src'])\n #create a base64 link from a dictionary containing the encoded pngs\n img['src'] = base64Prefix + encodedFiles[img['src']]\n\n file.seek(0)\n file.write(str(soup))\n file.truncate()\n\n#holds a map of the png file and it's base64 encoding in the form of {\"xx.png\":\"hsbudbud...\"}\nencodedFiles = {}\n#the string prefix to base64 encoded images\nbase64Prefix = \"data:image/png;base64,\"\n\n#The pdf file to convert\nSOURCE_FILE = os.path.abspath(sys.argv[1])\n\n# change to the source file directory since we can't be sure alfresco would\nos.chdir(os.path.split(SOURCE_FILE)[0])\n\n# First call poppler's pdftohtml to convert the file from pdf to html\nos.system(\"pdftohtml -s -c \" + SOURCE_FILE )\n\n#The pervious call adds a -html to the result of the conversion so we need to create an interim file name\n# that takes this into account\nINTERIM_FILE = os.path.splitext(SOURCE_FILE)[0]\nINTERIM_FILE += \"-html.html\"\n\n#The string is usually escaped so we need to remove the '\\' from the string\nINTERIM_FILE = INTERIM_FILE.replace(\"\\ \", \" \")\n# print(\"\\n\\nThe INTERIM FILE name is now: \" + INTERIM_FILE+\"\\n\\n\")\n\n#Look in the current directory and base 64 encode all png files into a map with the original src values as keys\n#(Extremely expensive. I know)\nimage_list = glob.glob(\"*.png\")\nif len(image_list) > 0:\n for file in image_list:\n with open(file, 'rb') as fh:\n #add to the map\n encodedFiles[file] = fh.read().encode('base64').replace('\\n', '')\n\n# Look for the and replace the urlencoded string in the html file\nHTMLFILE = open(os.path.abspath(INTERIM_FILE), 'r+')\nencodeImgSrc(HTMLFILE, encodedFiles)\nHTMLFILE.close()\n\n#For alfresco, change the name of the interim file to the target filename *argv[2]\nshutil.move(INTERIM_FILE, sys.argv[2])\n\n#clear the dictionary/map for sake of memory issues\nencodedFiles.clear()","repo_name":"magenta-aps/htmlthumbnail","sub_path":"src/main/resources/alfresco/extension/scripts/python/pdfToHtml.py","file_name":"pdfToHtml.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"33681154284","text":"def merge_sort(list):\n # split list into two sublists, sort those, then merge them\n # merge sublists by iterating through both lists\n if len(list) > 1:\n mid = len(list) // 2\n l = list[:mid]\n r = list[mid:]\n merge_sort(l)\n merge_sort(r)\n\n # merge\n # i tracks left\n # j track right\n # k tracks main array\n i = j = k = 0\n\n while i < len(l) and j < len(r):\n # l smaller\n if l[i] < r[j]:\n list[k] = l[i]\n i += 1\n # r smaller\n else:\n list[k] = r[j]\n j += 1\n k += 1\n\n while i < len(l):\n # l array leftover\n list[k] = l[i]\n i += 1\n k += 1\n\n while j < len(r):\n # r array leftover\n list[k] = r[j]\n j += 1\n k += 1\n\n\nlist = [5, 2, 4, 1, 3]\nmerge_sort(list)\nprint(list)\n","repo_name":"josephhlwang/DataStructsAlgo","sub_path":"Sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"36605606616","text":"import winreg\r\nkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, \"Software\\\\ThisIsMyCompany\")\r\nwinreg.SetValue(key, \"MyFolderLike\", winreg.REG_SZ, \"It's Subkey\")\r\nwinreg.SetValueEx(key, \"JustValueName\", 0 , winreg.REG_SZ, \"My Value\")\r\nkey.Close()\r\n\r\nkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER,\"Software\\\\ThisIsMyCompany\", 0, winreg.KEY_ALL_ACCESS)\r\nprint(\"Значение подключа или что-то типа папки:\", winreg.QueryValue(key,\"MyFolderLike\"))\r\nprint(\"\\nЗначение переменной или что-то типа того:\", winreg.QueryValueEx(key,\"JustValueName\")[0])\r\nkey.Close()\r\n\r\nprint()\r\ni = 0\r\nwhile True:\r\n try:\r\n print(winreg.EnumKey(winreg.HKEY_CURRENT_USER, i))\r\n except:\r\n print(\"//done//\")\r\n break\r\n i += 1\r\n\r\ntry:\r\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Software\\\\Valve\\\\Steam\", 0, winreg.KEY_ALL_ACCESS)\r\n print(\"\\nЗначение из программы Steam - последний использованный никнейм:\", winreg.QueryValueEx(key, \"LastGameNameUsed\")[0])\r\n key.Close()\r\nexcept BaseException:\r\n print(\"Неудалось вернуть значение из программы Steam\")\r\n\r\ninput(\"\\nДля продолжения нажмите любую клавишу\")\r\nwinreg.DeleteKey(winreg.HKEY_CURRENT_USER, \"Software\\\\ThisIsMyCompany\\\\MyFolderLike\")\r\nwinreg.DeleteKey(winreg.HKEY_CURRENT_USER, \"Software\\\\ThisIsMyCompany\")","repo_name":"Giro5/ModulWinreg","sub_path":"PythonApplication1/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"12918147265","text":"#sum of primes below n\n\n\n\ndef isPrime(num):\n\tif(num == 1 or num == 0): return False\n\n\tfor x in range (2,num):\n\t\tif num%x == 0: return False\n\n\treturn True\n\ndef primesUpTo2(n):\n\tglobal prime\n\ttotal = 0\n\tfor num in range (2,n):\n\t\tif num not in prime:\n\t\t\tif(isPrime(num)):\n\t\t\t\tprime[num] = True\n\t\t\t\tmultiply = 2\n\t\t\t\tcomposite = num * multiply\n\t\t\t\twhile composite < n:\n\t\t\t\t\tprime[composite] = False\n\t\t\t\t\tmultiply += 1\n\t\t\t\t\tcomposite = num * multiply\n\t\t\t\t\t#print(composite)\n\n\n\n\t\t\t#listOfPrimes.append(num)\n\t\t\tprint(num)\n\t\t\ttotal += num\n\treturn total\n\ndef primesUpTo(n):\n\tnumbers = set(range(n, 1, -1))\n\n\tprimes = []\n\twhile numbers:\n\t\tp = numbers.pop()\n\t\tprimes.append(p)\n\t\tnumbers.difference_update(set(range(p*2, n+1, p)))\n\treturn primes\n\ndef main():\n\tglobal prime \n\tprime = {}\n\tmax = 2000000\n\n\t#for x in range (0, max):\n\t#\tprime[x] = False\n\n\t#print( primesUpTo(10))\t\n\tlistP = primesUpTo(max)\n\ttotal = 0\n\tfor x in listP:\n\t\ttotal += x\n\tprint (total)\n\n\n\nmain()","repo_name":"hansyuan/projectEuler_hy","sub_path":"p10 - primeSummation/sumOfPrimes.py","file_name":"sumOfPrimes.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"1769108806","text":"from Arac import Arac\r\n\r\n\r\nclass Kamyon(Arac):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.TasimaKapasitesi = \"3 Ton\"\r\n self.YakitDepoSayisi = 1\r\n\r\n\r\n def BilgiYazdir(self):\r\n super().BilgiYazdir()\r\n print(f\"\"\"\r\n Taşıma Kapasitesi : {self.TasimaKapasitesi}\r\n Yakıt Depo Sayısı : {self.YakitDepoSayisi}\r\n \"\"\")\r\n","repo_name":"mertkama/Python1922","sub_path":"Kamyon.py","file_name":"Kamyon.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"29694671306","text":"import worker\nimport exceptions\nimport re\n\n\nclass Seller(worker.Worker):\n def __init__(self, firstName, lastName, age, experience, street, buildingNumber, apartmentNumber, city, company,\n effectiveness, provision):\n super().__init__(firstName, lastName, age, experience, street, buildingNumber, apartmentNumber, city, company)\n self.effectiveness = effectiveness\n self.provision = provision\n\n @property\n def effectiveness(self):\n return self.__effectiveness\n\n @effectiveness.setter\n def effectiveness(self, p):\n if p not in ('low', 'mid', 'high'):\n raise exceptions.EffectivenessMustBeLowOrMidOrHigh\n else:\n self.__effectiveness = p\n\n @property\n def provision(self):\n return self.__provision\n\n @provision.setter\n def provision(self, p):\n try:\n if int(p) < 0 or int(p) > 100:\n raise exceptions.ProvisionMustBeBetween0And100\n else:\n self.__provision = p\n except ValueError:\n raise exceptions.ProvisionMustBeANumber\n","repo_name":"alannadolny/test_automation-python","sub_path":"003_pracownicy/seller.py","file_name":"seller.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"13893449904","text":"from pymongo import MongoClient\n\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client.exp_database\npoints = db.obs\ndiff = db.dEdNdU\ndiffk = db.dEdNdUK\navgS = db.AVGS\navgK = db.AVGK\nrmsdb = db.RMS\nrmsdbk = db.RMSK\n\ndef MongoDriverPost(collection,post):\n collection.insert_one(post).inserted_id\n\ndef MongoDriverGet(collection,post):\n for point in collection.find(post):\n return point","repo_name":"SergeyDolin/EstExp","sub_path":"mongodriver.py","file_name":"mongodriver.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"29622939585","text":"import pygame, sys, time, os\n\n\n# The objects involved can be categorized into three classes: \n# Buttons: the objects used to navigate the menu \nclass Button:\n def __init__(self, image, x, y):\n self.image = pygame.image.load(image).convert()\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n\n\n# Columns: the stationery objects which the Discs are moving toward\nclass Column:\n def __init__(self, x):\n self.image = pygame.Surface((20, 140))\n self.rect = self.image.get_rect()\n self.rect.center = (x, 150)\n self.x = x\n\n\n# Discs: objects moving aroung the columns to eventually land on the last column \nclass Disc:\n def __init__(self, size, color):\n self.image = pygame.Surface((size, 20))\n self.image.fill(color)\n self.rect = self.image.get_rect()\n self.isup = 0\n self.column = 1\n\n\n# Check if the disc needs to be moved after it has been moved to the new column\ndef check_for_disc_collision(disc_list, column, n, y):\n for j in range(n):\n if disc_list[i] != disc_list[j] and disc_list[i].rect.colliderect(\n disc_list[j]) and i < j: # If the disc collides with a bigger disc in the new column:\n y = y - 20 # Move the disc up 20 pixel (The size of a disc)\n elif disc_list[i] != disc_list[j] and disc_list[i].rect.colliderect(\n disc_list[j]) and i > j: # If the disc collides with a smaller disc in the new column:\n return 0 # Break out of the function\n disc_list[i].rect.center = (column.x, y) # Change the disc's location\n return y\n\n\n# Move the disc to the column then check if it needs to be moved\ndef move_disc_to_column(list, n, i, column):\n x_list, y_list = list[i].rect.center # Save the original location of the disc\n list[i].rect.center = (column.x, 210) # Move the disc to the desired column\n _, y = list[i].rect.center # Take the height of the disc for use in the function above\n for _ in range(n): # The collision check must be done n times to ensure that the collision check is finished\n y = check_for_disc_collision(disc_list, column, n, y)\n if y == 0: # If the disc we are moving touch a smaller disc in the column\n list[i].rect.center = (x_list, y_list) # Return the disc to its original position\n break # End the function\n\n\n# Check the event queue for important events\ndef check_event():\n location = (-1, -1) # Reset the location of the mouse click\n for events in pygame.event.get():\n if events.type == pygame.QUIT:\n sys.exit() # Exit the program\n if events.type == pygame.MOUSEBUTTONDOWN:\n location = pygame.mouse.get_pos() # Get the location of the mouse click\n return location\n\n\n# Create the colors needed to fill the discs\ndisc_color = [(230, 108, 128), (242, 209, 88), (62, 203, 222), (136, 216, 176), (189, 155, 197)]\n\n# Create black and white pixels\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\n# Create a screen and name it\nsize = width, height = 720, 300\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption('Tower of Hanoi')\n\n# Create the three columns\ncolumn1 = Column(120)\ncolumn2 = Column(360)\ncolumn3 = Column(600)\n\ncurrent_path = os.path.join(os.getcwd(),'images')\n# Create the screens\nstart = pygame.image.load(os.path.join(current_path,'start menu.png')).convert()\nchoice = pygame.image.load(os.path.join(current_path,'choice menu.png')).convert()\nend = pygame.image.load(os.path.join(current_path,'end screen.png')).convert()\nbackground = pygame.image.load(os.path.join(current_path,'main background.png')).convert()\n\n# Create the buttons to control the game\nstart_button = Button(os.path.join(current_path,'start button.png'), 418, 130)\nexit_button = Button(os.path.join(current_path,'exit button.png'), 418, 209)\nthree_button = Button(os.path.join(current_path,'3 button.png'), 194, 105)\nfour_button = Button(os.path.join(current_path,'4 button.png'), 320, 105)\nfive_button = Button(os.path.join(current_path,'5 button.png'), 446, 105)\nrestart_button = Button(os.path.join(current_path,'restart button.png'), 636, 269)\nplay_again_button = Button(os.path.join(current_path,'play again button.png'), 235, 143)\nmain_menu_button = Button(os.path.join(current_path,'main menu button.png'), 0, 269)\n\n# Make the black borders transparent\nrestart_button.image.set_colorkey(black)\nmain_menu_button.image.set_colorkey(black)\n\nwhile 1:\n\n click_location = check_event()\n\n # Draw everything to the screen\n screen.blit(start, (0, 0))\n screen.blit(start_button.image, start_button.rect)\n screen.blit(exit_button.image, exit_button.rect)\n\n # Exit the program if \"Exit\" is pressed\n if exit_button.rect.collidepoint(click_location):\n sys.exit()\n\n # Continue to the next loop if \"Start\" is pressed\n if start_button.rect.collidepoint(click_location):\n break\n\n # Update the screen to show the images\n pygame.display.update()\n pygame.time.Clock().tick(60)\n\nwhile 1:\n\n click_location = check_event()\n\n # Draw everything to the screen\n screen.blit(choice, (0, 0))\n screen.blit(three_button.image, three_button.rect)\n screen.blit(four_button.image, four_button.rect)\n screen.blit(five_button.image, five_button.rect)\n\n # Get the selection and exit the loop\n if three_button.rect.collidepoint(click_location):\n n = 3\n break\n elif four_button.rect.collidepoint(click_location):\n n = 4\n break\n elif five_button.rect.collidepoint(click_location):\n n = 5\n break\n\n # Update the screen\n pygame.display.update()\n pygame.time.Clock().tick(60)\n\n# Create the discs\ndisc_list = []\npoint_1 = (170 - (n - 3) * 20)\nfor i in range(1, n + 1):\n disc_list.append(Disc(i * 40, disc_color[i - 1]))\n disc_list[i - 1].rect.center = (120, point_1)\n point_1 = point_1 + 20\n\nable_to_move_up = 1\n\nwhile 1:\n\n click_location = check_event()\n\n # Draw everything to the screen\n screen.blit(background, (0, 0))\n screen.blit(column1.image, column1.rect)\n screen.blit(column2.image, column2.rect)\n screen.blit(column3.image, column3.rect)\n screen.blit(restart_button.image, restart_button.rect)\n screen.blit(main_menu_button.image, main_menu_button.rect)\n\n # If \"Main menu\" button is pressed, execute the file again to return to the start menu\n if main_menu_button.rect.collidepoint(click_location):\n execfile('Tower of Hanoi.py')\n\n # Check for click on the restart button\n if restart_button.rect.collidepoint(click_location):\n point_1 = 170 - (n - 3) * 20\n\n # Reset everything to their original position\n for i in range(n):\n disc_list[i].rect.center = (120, point_1)\n point_1 = point_1 + 20\n\n # Check if a disc is up\n for i in range(n):\n if disc_list[i].isup == 1:\n able_to_move_up = 0 # Prevent any disc from being moved up\n\n # Check for click on a disc and move that disc up if possible\n if able_to_move_up:\n for i in range(n):\n if disc_list[i].rect.collidepoint(click_location):\n\n # Check if the disc is on top of the column\n x_disc, y_disc = disc_list[i].rect.center # Save the original location of the disc\n no_up = 0\n for j in range(n):\n disc_list[i].rect.center = (x_disc, y_disc - 20)\n if disc_list[i] != disc_list[j] and disc_list[i].rect.colliderect(\n disc_list[j]): # If the disc touch another disc:\n no_up = 1 # It can not be moved up\n\n # Do not move the disc if it is not on top of the column\n if no_up == 1:\n disc_list[i].rect.center = (x_disc, y_disc) # Return the disc to its original position\n\n # Move the disc up otherwise\n else:\n disc_list[i].rect.center = (x_disc, 50)\n disc_list[i].isup = 1\n\n for i in range(n):\n # Execute when a disc is up\n if disc_list[i].isup:\n\n click_location = check_event()\n\n if click_location != (-1, -1):\n\n # Check click and move disc to column 1\n if column1.rect.collidepoint(click_location):\n move_disc_to_column(disc_list, n, i, column1)\n\n # Check click and move disc to column 2\n elif column2.rect.collidepoint(click_location):\n move_disc_to_column(disc_list, n, i, column2)\n\n # Check click and move disc to column 3\n elif column3.rect.collidepoint(click_location):\n move_disc_to_column(disc_list, n, i, column3)\n\n # Check if the disc is up\n _, y_disc = disc_list[i].rect.center\n if y_disc > 50:\n disc_list[i].isup = 0\n able_to_move_up = 1\n\n # Draw every disc to the screen\n for i in range(n):\n screen.blit(disc_list[i].image, disc_list[i].rect)\n\n # Update the screen\n pygame.display.update()\n\n # Check if the discs are in the winning positions\n point_1, score = 210, 0\n for i in range(n - 1, -1, -1):\n if disc_list[i].rect.center == (600, point_1):\n score += 1\n point_1 -= 20\n\n # Check if the game is won\n if score == n:\n time.sleep(2)\n break\n\n # Reset the counters\n else:\n point_1, score = 170 - (n - 3) * 20, 0\n pygame.time.Clock().tick(60)\n\nwhile 1:\n\n click_location = check_event()\n\n # Draw everything to the screen\n screen.blit(end, (0, 0))\n screen.blit(play_again_button.image, play_again_button.rect)\n\n # Check if replay button is pressed and open the game again\n if play_again_button.rect.collidepoint(click_location):\n execfile('Tower of Hanoi.py')\n\n # Update the screen\n pygame.display.update()\n pygame.time.Clock().tick(60)\n","repo_name":"KienLe1609/tower-of-hanoi","sub_path":"Tower of Hanoi.py","file_name":"Tower of Hanoi.py","file_ext":"py","file_size_in_byte":9999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"40136729450","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 24 12:28:10 2023\n\n@author: COnnor.gibbs\n\"\"\"\n\nfrom time import strptime\nimport re\ndef is_valid_date(string):\n \"\"\"Checks if a string is a valid date\"\"\"\n try:\n strptime(string, '%m/%d/%Y')\n return True\n except ValueError:\n return False\n\ndef is_ssn(string):\n \"\"\"Checks if string is a valid social security number\"\"\"\n pattern = r'^\\d{9}$' # Regex pattern for exactly nine digits\n return bool(re.match(pattern, string))\n\ndef remove_empty_words(lines):\n \"\"\"Removes empty words from each line in lines.\"\"\"\n lines = [[elem for elem in inner_list if elem != ''] for inner_list in lines]\n return lines\n\ndef remove_string(lines, string):\n \"\"\"Recursively removes a string from each line in lines.\"\"\"\n for inner_list in lines:\n for i in range(len(inner_list)):\n inner_list[i] = inner_list[i].replace(string, '')\n return lines\n\ndef remove_lines_with_string(lines, strings):\n \"\"\"Removes line in lines if line contains any string in strings.\"\"\"\n strings_set = set(strings)\n lines = [line for line in lines if not any(string in ' '.join(line) for string in strings_set)]\n return lines\n\ndef line_contains_string(lines, strings):\n \"\"\"Returns true if any of the strings are in the line for each line or false otherwise.\"\"\"\n strings_set = set(strings)\n result = []\n for line in lines:\n found = False\n for string in strings_set:\n if string in ' '.join(line):\n found = True\n break\n result.append(found)\n return result","repo_name":"ConGibbs10/OCR","sub_path":"py/StringValidation.py","file_name":"StringValidation.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"5909562091","text":"import json\r\n\r\ndef json_reader(json_file = \"history_mobilenet.json\"):\r\n\r\n jf = open(json_file)\r\n\r\n history = json.load(jf)\r\n jf.close()\r\n\r\n print(history.keys())\r\n # print(history['val_loss'])\r\n for key in history.keys():\r\n save_name = \"{}.csv\".format(key)\r\n data = []\r\n for i, val in enumerate(history[key]):\r\n\r\n # print(i+1,',', val, sep='')\r\n line = \"{},{}\".format(i,val)\r\n data.append(line+\"\\n\")\r\n with open(save_name, \"w\") as outfile:\r\n outfile.writelines(data)\r\n\r\n\r\n\r\njson_reader()\r\n\r\n\r\n","repo_name":"hangwudy/utils","sub_path":"json_reader.py","file_name":"json_reader.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"17360851835","text":"from vector import vector, vector3\nfrom math import sin as sine, cos as cosine, tan, radians, inf\n\n\n\nclass camera3:\n def __init__(self, pos: vector3, resolution: vector, fov = 90):\n self.pos = pos\n self.rotation = vector3(0, 0, 0)\n self.offset = vector(resolution.x, resolution.y)\n self.depth = 1 / tan(radians(fov) / 2) * (resolution.x + resolution.y) * 0.5\n self.updateNormal()\n\n def projection(self, point: vector3) -> vector:\n # function for projecting a point in 3d onto the screen\n difference = self.pos - point\n\n sin = vector3(sine(self.rotation.x), sine(self.rotation.y), sine(self.rotation.z))\n cos = vector3(cosine(self.rotation.x), cosine(self.rotation.y), cosine(self.rotation.z))\n\n cameraTransform = vector3(\n cos.y * (sin.z * difference.y + cos.z * difference.x) - sin.y * difference.z,\n sin.x * (cos.y * difference.z + sin.y * (sin.z * difference.y + cos.z * difference.x)) + cos.x * (cos.z * difference.y - sin.z * difference.x),\n cos.x * (cos.y * difference.z + sin.y * (sin.z * difference.y + cos.z * difference.x)) - sin.x * (cos.z * difference.y - sin.z * difference.x)\n )\n\n return (vector(cameraTransform.x, cameraTransform.y) * self.depth / cameraTransform.z).flipY() + self.offset\n\n def updateNormal(self):\n # updates the directin that the camera is facing\n self.normal = vector3.pointOnSphere(self.rotation)\n\n def rotate(self, rot: vector3, dt: float, speed: float):\n # rotate the camera\n self.rotation += rot * (dt * speed * 0.001)\n self.rotation = self.rotation.clamp(vector3(-1.5708, -inf, -inf), vector3(1.5708, inf, inf))\n self.updateNormal()\n\n def move(self, movement: vector3):\n # move the camera\n self.pos += movement","repo_name":"j0e133/3d_Graphics","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"70211266951","text":"# -*- encoding: utf-8 -*-\nfrom pytest import raises\nfrom chomsky import *\n\n\nmatchers = [\n Chars('aeiou'),\n W('aeiou'),\n ]\n\n\ndef test_chars_repr():\n assert repr(Chars('aeiou')) == \"Chars('aeiou')\"\n assert repr(Chars('aeiou', min=1)) == \"Chars('aeiou')\"\n assert repr(Chars('aeiou', min=2)) == \"Chars('aeiou', min=2)\"\n assert repr(Chars('aeiou', max=2)) == \"Chars('aeiou', max=2)\"\n assert repr(Chars('aeiou', min=2, max=3)) == \"Chars('aeiou', min=2, max=3)\"\n assert repr(Chars('aeiou', suppress=False)) == \"Chars('aeiou')\"\n assert repr(Chars('aeiou', suppress=True)) == \"Chars('aeiou', suppress=True)\"\n assert repr(Chars('aeiou', inverse=True)) == \"Chars('aeiou', inverse=True)\"\n assert repr(W('aeiou')) == \"Chars('aeiou')\"\n assert repr(W('あいうえお')) == \"Chars('あいうえお')\"\n\n\ndef test_chars_matcher():\n parse = 'a ae aei aeio aeiou'.split(' ')\n for matcher in matchers:\n for p in parse:\n parsed = matcher(p)\n assert parsed == p\n\n\ndef test_chars_any_matcher():\n parse = 'a ae aei aeio aeiou aeiouz'.split(' ')\n matcher = Chars()\n for p in parse:\n parsed = matcher(p)\n assert parsed == p\n\n\ndef test_chars_matcher_unicode():\n parse = 'あ あい あいう あいうえ あいうえお'.split(' ')\n for p in parse:\n parsed = Chars('あいうえお')(p)\n assert parsed == p\n\n\ndef test_inverse_word_matcher():\n parse = 'b bc bc! bc!: bc!:-'.split(' ')\n matcher = Chars('aeiou', inverse=True)\n for p in parse:\n parsed = matcher(p)\n assert parsed == p\n parsed = matcher('bc!a')\n assert parsed == 'bc!'\n\n\ndef test_chars_matcher_fail():\n parse = 'b bc bad'.split(' ')\n for matcher in matchers:\n for p in parse:\n with raises(ParseException):\n matcher(p)\n","repo_name":"colinta/chomsky","sub_path":"test/matchers/test_chars_matcher.py","file_name":"test_chars_matcher.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"}
+{"seq_id":"30697242189","text":"# Import and variables\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom flask import Flask, render_template, request, redirect, url_for, send_from_directory\nfrom PIL import Image\nfrom tensorflow.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.keras.preprocessing import image\n\napp = Flask(__name__)\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\napp.config['UPLOAD_FOLDER'] = \"uploads\"\napp.config['STATIC_FOLDER'] = \"static\"\napp.config['HOST'] = '0.0.0.0'\napp.config['PORT'] = 80\napp.config['IS_DEBUG'] = True\nsizeofimage = 256\n\n# Load model\ncauliflower_model = tf.keras.models.load_model(app.config['STATIC_FOLDER'] + \"/model/Cauliflower_best_model.h5\")\n\n# Preprocess an image\ndef preprocess(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [sizeofimage, sizeofimage])\n image /= 255.0 # normalize\n return image\n\n# Read the image from path\ndef load(path):\n image = tf.io.read_file(path)\n return preprocess(image)\n\n# Add a pre-screenr to catch image that doesn't belong to the class\n# Load pre-trained ResNet50 model\npre_trained_model = tf.keras.applications.ResNet50(weights='imagenet')\n\n# Define a function to preprocess the image and predict the class using the pre-trained model\ndef model_predict_image(image_path):\n img = image.load_img(image_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = pre_trained_model.predict(x)\n return tf.keras.applications.resnet50.decode_predictions(preds, top=1)[0][0][1]\n\n# Predict & classify image\ndef model_classify(model, image_path):\n # Add a pre-screener to catch images that are not cauliflower or do not belong to the same category\n if model_predict_image(image_path) != \"cauliflower\":\n return \"Not a cauliflower image\", 0.0\n\n finalimage = load(image_path)\n finalimage = tf.reshape(finalimage, (1, sizeofimage, sizeofimage, 3))\n\n probability = cauliflower_model.predict(finalimage)\n label = \"Healthy\" if probability[0][0] <= 0.5 \\\n else \"Diseased\"\n\n classified_probability = probability[0][0] \\\n if probability[0][0] >= 0.5 \\\n else 1 - probability[0][0]\n\n return label, classified_probability\n\n# Index landing page\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n# About page\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n# Contact page\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n# Feedback page\n@app.route(\"/feedback\")\ndef feedback():\n return redirect(\"https://forms.office.com/r/FbhU0KprGJ\")\n\n# Classify page\n@app.route(\"/classify\", methods=[\"POST\", \"GET\"])\ndef predict():\n if request.method == \"POST\":\n file = request.files[\"image\"]\n upload_image_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)\n file.save(upload_image_path)\n label = model_predict_image(upload_image_path)\n label, probability = model_classify(cauliflower_model, upload_image_path)\n probability = round((probability * 100), 2)\n return render_template(\"classified.html\", imagefile=file.filename, label=label, prob=probability)\n else:\n return render_template(\"classify.html\")\n\n@app.route(\"/classify/\")\ndef send_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\nif __name__ == '__main__':\n app.run(host=app.config['HOST'], port=app.config['PORT'], debug=app.config['IS_DEBUG'])\n","repo_name":"abi-manoharan97/CheckMyPlant","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"41198762786","text":"\"\"\"\nTranslated in python by Johan Mazoyer from IDL routine\nkowalsky.pro written by Christopher Stark\n\nDeprojects an ellipse using the Kowalsky method as detailed by Smart 1930.\nSmart 1930 derives the following assuming the star/focus is at the origin.\nThe ellipse is assumed to have been centered at (0,0), rotated by an\nangle, then shifted to a new center, i.e. rotation before\ntranslation, NOT THE OTHER WAY AROUND.\n\n\"\"\"\n\nimport numpy as np\nimport math as mt\n\n\ndef kowalsky(a, ecc, pa, E_offset, N_offset):\n\n # INPUT: (Observed/projected ellipse parameters)\n # a = semi-major axis\n # ecc = eccentricity\n # pa = position angle in degrees measured E of N\n # E_offset = center of ellipse in the East direction (+ = Eastward)\n # N_offset = center of ellipse in the North direction (+ = Northward)\n\n # OUTPUT: (Deprojected ellipse parameters)\n # true_a = semi-major axis\n # true_ecc = eccentricity\n # argperi = argument of pericenter, the angle the true ellipse is\n # rotated by prior to inclining (degrees)\n # inc = inclination (degrees)\n # longnode = longitude of the ascending node on the sky measured E of N (degrees)\n\n # The inputs are in terms of PA measured E of N, and E and N offsets.\n # Here we work in x and y coords (x = West, y = North)\n\n dx = - E_offset\n dy = N_offset\n temppa = pa + 90\n\n #Define some variables\n parad = np.radians(temppa)\n cpa = np.cos(parad)\n spa = np.sin(parad)\n cpa2 = cpa*cpa\n spa2 = spa*spa\n oneoa2 = 1./(a*a)\n b = a * np.sqrt(1.- ecc*ecc)\n oneob2 = 1./(b*b)\n\n #The general equation for an ellipse is given by\n # A0 * x**2 + 2 * H0 * x * y + B0 * y**2 + 2 * G0 * x + 2 * F0 * y + 1 = 0\n #For an ellipse rotated CCW by angle phi, then centered at (dx, dy), we have:\n # A*x**2 + 2*H*x*y + B*y**2 - 2*(A*dx+H*dy)*x - 2*(B*dy+H*dx)*y + A*dx**2+2*H*dx*dy+B*dy**2-1 = 0\n #where\n #A = ( cos(phi)**2 / a**2 + sin(phi)**2 / b**2 )\n #B = ( sin(phi)**2 / a**2 + cos(phi)**2 / b**2 )\n #H = cos(phi) * sin(phi) * (1/a**2 - 1/b**2)\n #With that in mind...\n A0 = ( cpa2 * oneoa2 + spa2 * oneob2 )\n B0 = ( spa2 * oneoa2 + cpa2 * oneob2 )\n H0 = cpa * spa * (oneoa2 - oneob2)\n F0 = - (B0 * dy + H0 * dx)\n G0 = - (A0 * dx + H0 * dy)\n f = A0 * dx * dx + 2 * H0 * dx * dy + B0 * dy * dy - 1\n A0 /= f\n B0 /= f\n H0 /= f\n F0 /= f\n G0 /= f\n F02 = F0 * F0\n G02 = G0 * G0\n\n #First we calculate the longitude of the ascending node\n twolongnode = np.arctan2(-2*(F0*G0 - H0) , (F0**2 - G0**2 + A0 - B0)) #big Omega in Smart 1930\n if twolongnode < 0:\n twolongnode += 2*np.pi #long. of asc. node is between 0 and 180 according to Smart 1930\n stln = np.sin(twolongnode)\n fgmh = F0*G0-H0\n if (stln/abs(stln)) * (fgmh/abs(fgmh)) > 0: #if they are not opposite signs, add pi\n raise ValueError(\"ERROR in kowalsky.py: 2*longnode must have opposite sign of F0*G0-H0.\")\n longnode = twolongnode * 0.5\n\n #Now for the inclination\n tan2iop2 = (F02 - G02 + A0 - B0) / np.cos(twolongnode) #a quantity we need to calculate temporarily\n p = np.sqrt(2./(F02 + G02 - A0 - B0 - tan2iop2)) #another quantity\n inc = np.arctan(np.sqrt(tan2iop2 * p * p)) #i in Smart 1930 <---this is the abs value of inc\n\n #Now for the argument of periastron, the angle that determines the axis of inclination\n argperi = np.arctan2((G0*np.sin(longnode) - F0*np.cos(longnode)) * np.cos(inc) ,- (G0*np.cos(longnode) + F0*np.sin(longnode))) #little omega in Smart 1930\n\n #Now for the eccentricity\n true_ecc = - p * (G0*np.cos(longnode) + F0*np.sin(longnode)) / np.cos(argperi) #e in Smart 1930\n if true_ecc < 0:\n print('Adding pi to argperi...')\n argperi += np.pi\n true_ecc = abs(true_ecc)\n\n #Finally, the semi-major axis\n true_a = p / (1. - true_ecc*true_ecc) #a in Smart 1930\n\n #Convert angles to degrees\n rad2deg = 180./np.pi\n argperi *= rad2deg\n inc *= rad2deg\n longnode *= rad2deg\n\n longnode += 90. #right now longnode is measured N of W, we add 90 to get E of N\n if longnode > 180:\n longnode -= 180. #limit it to 0 - 180\n\n return true_a, true_ecc, argperi, inc, longnode","repo_name":"johanmazoyer/debrisdisk_mcmc_fit_and_plot","sub_path":"kowalsky.py","file_name":"kowalsky.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"9412450594","text":"# server.\r\nfrom imapclient import IMAPClient\r\nimport imaplib\r\nimport calfunctions\r\nimport email\r\nfrom email.header import decode_header\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport time\r\nimport sqlite3\r\nfrom sqlite3 import Error\r\nimport smtplib, ssl\r\nimport emails\r\nme = input(\"user email/number\")\r\nHOST = 'imap.gmail.com'\r\nUSERNAME = input(\"host email\")\r\ninput(\"Username/Email\")\r\nPASSWORD = input(\"password\")\r\n\r\n# authenticate\r\n\r\nserver = IMAPClient(HOST)\r\nserver.login(USERNAME, PASSWORD)\r\nserver.select_folder(\"INBOX\")\r\n# messages = server.search(['FROM', me])\r\n# Start IDLE mode\r\nserver.idle()\r\nprint(\"Connection is now in IDLE mode, send yourself an email or quit with ^c\")\r\nwhile True:\r\n try:\r\n # Wait for up to 30 seconds for an IDLE response\r\n responses = server.idle_check(timeout=5)\r\n print(\"Server sent:\", responses if responses else \"nothing\")\r\n \r\n if responses:\r\n server.idle_done()\r\n \r\n rec = server.fetch([responses[0][0]+1], ['RFC822'])\r\n for uid, msgdata in rec.items():\r\n parsedEmail = msgdata[b'RFC822']\r\n print(uid)\r\n \r\n msg = email.message_from_bytes(parsedEmail)\r\n FROM = decode_header(msg.get(\"From\"))[0][0]\r\n if FROM == me:\r\n for part in msg.walk():\r\n # extract content type of email\r\n try:\r\n body = part.get_payload(decode=True).decode()\r\n \r\n thing = []\r\n soup = BeautifulSoup(body, 'lxml')\r\n row = soup.find(\"pre\")\r\n thing.append(str(row))\r\n \r\n simplebody = thing[0].replace(\"\",\"\").replace(\" \",\"\").replace(\" \",\"\")\r\n start = thing[0].find(\"~\") + len(\"~\")\r\n end = thing[0].find(\"(\")\r\n command = thing[0][start:end]\r\n\r\n start2 = thing[0].find(\"(\") + len(\"(\")\r\n end2 = thing[0].find(\")\")\r\n para_whole = thing[0][start2:end2]\r\n para_split = para_whole.split(\"*\") \r\n print(command)\r\n \r\n if command == \"log\":\r\n calfunctions.log(*para_split)\r\n emails.sendher(emails.logsucc)\r\n if command == \"display\":\r\n calfunctions.display(para_split[0])\r\n \r\n except:\r\n pass\r\n else:\r\n server.delete_messages(responses[0][0])\r\n \r\n \r\n server.idle()\r\n \r\n\r\n \r\n \r\n\r\n \r\n except KeyboardInterrupt:\r\n break\r\n\r\nserver.idle_done()\r\nprint(\"\\nIDLE mode done\")\r\nserver.logout()\r\n","repo_name":"jacobeldridge/Calander-website","sub_path":"clienttest.py","file_name":"clienttest.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"32278873510","text":"def is_Palindrome(n):\r\n m = str(n)\r\n for i in range(0, len(m)):\r\n if m[i] != m[len(m) - 1 - i]:\r\n return False\r\n break\r\n else:\r\n return True\r\n\r\n\r\ndef max(a, b):\r\n if a >= b:\r\n return a\r\n else:\r\n return b\r\n\r\n\r\ni = 999\r\nresult = 0\r\nwhile (i > 99):\r\n j = i\r\n while (j > 99):\r\n if is_Palindrome(i * j):\r\n result = max(result, i*j)\r\n break\r\n else:\r\n j -= 1\r\n i -= 1\r\nprint (result)\r\n","repo_name":"pratyaydeep/Python-programs","sub_path":"ProjectEuler/Problem4_ProjectEuler.py","file_name":"Problem4_ProjectEuler.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"30530642608","text":"import pathlib\nimport re\n\nfrom .multiple import absolute_scans_from_mephysto\n\n\ndef load_mephysto_directory(directory, regex, absolute_doses, normalisation_depth):\n \"\"\"Read and normalise a directory of Mephysto files.\n\n Mephysto files are renormalised at the ``normalistation_depth`` to be\n equal to the values passed within the ``absolute_doses`` dictionary.\n\n Parameters\n ----------\n directory : path like object\n The directory containing the Mephysto files.\n regex : str\n A regex string defined such that ``re.match(regex, mcc_filename).group(1)``\n returns the key used to look up the absolute doses.\n absolute_doses : dict\n A dictionary mapping file keys to absolute doses defined at the\n ``normalisation_depth``.\n normalisation_depth : float\n The normalisation depth at which to apply the absolute doses.\n Can also optionally pass the string ``'dmax'``. This is in mm.\n\n Returns\n -------\n absolute_scans_per_field : dictionary\n A dictionary with the same keys as ``absolute_doses`` with the\n re-normalised depth doses and profiles contained within it.\n \"\"\"\n directory = pathlib.Path(directory)\n\n all_mephysto_files = list(directory.glob(\"*.mcc\"))\n matches = [re.match(regex, filepath.name) for filepath in all_mephysto_files]\n keys = [match.group(1) for match in matches if match]\n mephysto_files = [\n filepath for filepath, match in zip(all_mephysto_files, matches) if match\n ]\n\n if not set(keys).issubset(set(absolute_doses.keys())):\n keys_not_found = set(keys) - set(absolute_doses.keys())\n raise ValueError(\n \"The following keys were not provided within the \"\n f\"`absolute_doses` variable:\\n{keys_not_found}\"\n )\n\n mephysto_file_map = dict(zip(keys, mephysto_files))\n\n absolute_scans_per_field = {\n key: absolute_scans_from_mephysto(\n mephysto_file_map[key], absolute_doses[key], normalisation_depth\n )\n for key in keys\n }\n\n return absolute_scans_per_field\n","repo_name":"pymedphys/pymedphys","sub_path":"lib/pymedphys/_experimental/fileformats/mephysto/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":273,"dataset":"github-code","pt":"27"}
+{"seq_id":"40355076559","text":"# link.py\n\nimport sqlite3\nfrom osrs_api import Hiscores\nimport os\nfrom dotenv import load_dotenv\nimport update\n\nload_dotenv()\n\n# SQL\ndb_path = os.environ.get('DB_PATH')\ndb_conn = sqlite3.connect(db_path)\n\nasync def on_message(message):\n if message.content.startswith(\"!link \"):\n args = message.content.split()\n if len(args) == 2:\n await link_account(message)\n elif len(args) == 3 or len(args) == 4:\n if message.mentions:\n await admin_link_account(message)\n else:\n await link_account(message)\n else:\n await message.channel.send(\"Invalid command. Please use `!link ` or `!link <@user> `\")\n elif message.content.startswith(\"!unlink \"):\n args = message.content.split()\n if len(args) < 2:\n await message.channel.send(\"Please enter a RuneScape username to unlink from your account.\")\n return\n rs_username = \" \".join(args[1:])\n await unlink_rs_username(message, rs_username)\n elif message.content == \"!link\":\n await message.channel.send(\"Please enter a RuneScape username to link with your account.\")\n elif message.content == \"!unlink\":\n await unlink_discord_account(message)\n\n\nasync def link_account(message):\n # Get the user's desired RuneScape username from the message\n args = message.content.split()\n if len(args) < 2:\n await message.channel.send(\"Please enter a RuneScape username to link with your account.\")\n return\n username = \" \".join(args[1:])\n\n # Check if the username is already linked to a Discord account\n linked_user_id = db_conn.execute(\"SELECT discord_id FROM user_links WHERE rs_username = ?\", (username,)).fetchone()\n if linked_user_id:\n # The username is already linked to a Discord account\n linked_user = await message.guild.fetch_member(linked_user_id[0])\n if linked_user:\n await message.channel.send(f\"The RuneScape account {username} is already linked to the Discord account {linked_user.mention}. Please contact an administrator if you believe there is an error.\")\n else:\n await message.channel.send(f\"The RuneScape account {username} is already linked to a Discord account, but that account no longer exists on this server. Please contact an administrator if you believe there is an error.\")\n return\n\n # Check if the username is a valid RuneScape account\n if not is_valid_rs_account(username):\n await message.channel.send(f\"The account {username} is not on the highscores.\")\n return\n\n # Check if the user is already linked to a different RuneScape account\n current_rs_username = db_conn.execute(\"SELECT rs_username FROM user_links WHERE discord_id = ?\", (message.author.id,)).fetchone()\n if current_rs_username:\n await message.channel.send(f\"Your Discord account is already linked to the RuneScape account {current_rs_username[0]}. Please use the `!unlink` command to unlink your account first.\")\n return\n\n # Store the link between Discord user ID and RuneScape username in the database\n db_conn.execute(\"INSERT INTO user_links (discord_id, rs_username) VALUES (?, ?)\", (message.author.id, username))\n db_conn.commit()\n\n # Send a confirmation message to the user\n await message.channel.send(f\"Your Discord account has been linked to the RuneScape account {username}.\")\n\nasync def admin_link_account(message):\n # Check if the user is an admin\n if not message.author.guild_permissions.administrator:\n await message.channel.send('You do not have permission to use this command.')\n return\n\n # Parse the Discord user and RuneScape username from the message\n args = message.content.split()\n if len(args) < 3:\n await message.channel.send(\"Please provide a Discord user and a RuneScape username to link.\")\n return\n\n # Find the Discord user object\n discord_user = message.mentions[0]\n if discord_user is None:\n await message.channel.send(\"Please provide a valid Discord user.\")\n return\n\n # Get the RuneScape username\n if args[2].startswith('\"') and args[-1].endswith('\"'):\n username = \" \".join(args[2:])\n username = username[1:-1]\n else:\n username = args[2]\n\n # Check if the username is already linked to a Discord account\n cursor = db_conn.cursor()\n cursor.execute(\"SELECT discord_id FROM user_links WHERE rs_username = ?\", (username,))\n row = cursor.fetchone()\n cursor.close()\n\n if row is not None:\n # The username is already linked to a Discord account\n linked_user = await message.guild.fetch_member(row[0])\n if linked_user:\n await message.channel.send(f\"The Runescape account {username} is already linked to the Discord account {linked_user.mention}. Please contact an administrator if you believe there is an error.\")\n else:\n await message.channel.send(f\"The Runescape account {username} is already linked to a Discord account, but that account no longer exists on this server. Please contact an administrator if you believe there is an error.\")\n elif not is_valid_rs_account(username):\n # The username is not a valid Runescape account\n await message.channel.send(f\"The account {username} is not on the highscores.\")\n else:\n # Check if the Discord account is already linked to a RuneScape account\n cursor = db_conn.cursor()\n cursor.execute(\"SELECT rs_username FROM user_links WHERE discord_id = ?\", (discord_user.id,))\n row = cursor.fetchone()\n cursor.close()\n\n if row is not None:\n await message.channel.send(f\"The Discord account {discord_user.mention} is already linked to the RuneScape account {row[0]}.\")\n else:\n # Store the link between Discord user ID and RuneScape username in the database\n cursor = db_conn.cursor()\n cursor.execute(\"INSERT INTO user_links (discord_id, rs_username) VALUES (?, ?)\", (discord_user.id, username))\n db_conn.commit()\n cursor.close()\n\n # Send a confirmation message to the admin and the linked user\n await message.channel.send(f\"The Discord account {discord_user.mention} has been linked to the RuneScape account {username}.\")\n await discord_user.send(f\"Your Discord account has been linked to the RuneScape account {username}.\")\n\nasync def unlink_discord_account(message):\n # Remove the link between the Discord user ID and the RuneScape username\n cursor = db_conn.cursor()\n cursor.execute(\"DELETE FROM user_links WHERE discord_id = ?\", (message.author.id,))\n db_conn.commit()\n cursor.close()\n\n await message.channel.send(\"Your Discord account has been unlinked from your RuneScape account.\")\n\nasync def unlink_rs_username(message, rs_username):\n # Check if the user is an admin\n if not message.author.guild_permissions.administrator:\n await message.channel.send('You do not have permission to use this command.')\n return\n\n cursor = db_conn.cursor()\n cursor.execute(\"DELETE FROM user_links WHERE discord_id = ? AND rs_username = ?\", (message.author.id, rs_username))\n db_conn.commit()\n cursor.close()\n\n await message.channel.send(f\"The Discord account {message.author.mention} has been unlinked from the Runescape account {rs_username}.\")\n\ndef is_valid_rs_account(username):\n try:\n Hiscores(username)\n return True\n except Exception as e:\n if \"Unable to find\" in str(e):\n return False\n else:\n raise e","repo_name":"LucasSzwagiel1092/Discord-Bot","sub_path":"src/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":7650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"18286402824","text":"import machine\nimport time\n\nimport gfx\nfrom ili9341 import Display, color565\nfrom xglcd_font import XglcdFont\n\n# TFT display constants\nTFTWIDTH = 240\nTFTHEIGHT = 320\n\n# SPI constants\nSPI_BAUDRATE = 20000000\nSPI_SCK = 18\nSPI_MOSI = 23\nSPI_MISO = 19\nSPI_CS = 5\nSPI_DC = 16\nSPI_RST = 17\n\n# Color constants\nBLACK = 0x000000\nWHITE = 0xFFFFFF\nRED = 0xFF0000\nGREEN = 0x00FF00\nBLUE = 0x0000FF\n# Initialize SPI bus and TFT display\nspi = machine.SPI(2, baudrate=SPI_BAUDRATE, polarity=0, phase=0, bits=8,\n firstbit=machine.SPI.MSB, sck=machine.Pin(SPI_SCK),\n mosi=machine.Pin(SPI_MOSI), miso=machine.Pin(SPI_MISO))\n\ndisplay = Display(spi, cs=machine.Pin(SPI_CS), dc=machine.Pin(SPI_DC),\n rst=machine.Pin(SPI_RST), width=TFTHEIGHT, height=TFTWIDTH, rotation=90)\n\ngraphics = gfx.GFX(320, 240, display.draw_pixel)\nunispace = XglcdFont('fonts/Unispace12x24.c', 12, 24)\n\n# Function to draw a filled rectangle\ndef fill_rect(x, y, width, height, color):\n display.fill_rectangle(x, y, x + width - 1, y + height - 1, color)\n\n# Function to fill the screen with a color\ndef fill_screen(color):\n fill_rect(0, 0, TFTWIDTH, TFTHEIGHT, color)\n\n# Function to show a message on the screen\ndef message(text, color=0x3E64FF):\n fill_rect(30, 50, 80, 200, color)\n display.draw_text(90, 70, text, unispace, WHITE, color)\n\n# Function to show a message box on the screen and get user input\ndef message_box(text=\"Enter Number\", color=0x3E64FF):\n fill_rect(30, 50, 80, 200, color)\n display.draw_text(80, 70, text, unispace, WHITE, color)\n display.draw_rectangle(75, 105, 170, 40, WHITE)\n display.draw_text(80, 112, input_text, unispace, WHITE, color) # Display the current input text\n time.sleep(0.2) # Delay to avoid capturing multiple key presses\n\nkeypad_rows = [machine.Pin(pin, machine.Pin.IN, machine.Pin.PULL_UP) for pin in (13, 12, 14, 27)]\nkeypad_cols = [machine.Pin(pin, machine.Pin.OUT) for pin in (26, 25, 33, 32)]\n\n# Define the labels for each button on the keypad\nkeypad_labels = [\n \"1\", \"2\", \"3\", \"A\",\n \"4\", \"5\", \"6\", \"B\",\n \"7\", \"8\", \"9\", \"C\",\n \"*\", \"0\", \"#\", \"D\"\n]\n\ninput_text = \"\" # Initialize the input_text variable\n\ndef read_keypad(rows, cols, labels):\n for col in cols:\n col.value(0)\n for i, row in enumerate(rows):\n if row.value() == 0:\n col.value(1)\n return labels[i * 4 + cols.index(col)]\n col.value(1)\n return None\n\ndef key_value():\n global input_text\n key = read_keypad(keypad_rows, keypad_cols, keypad_labels)\n if key is not None and key != \"#\" and len(input_text) < 20: # Limit input_text length to 20 characters\n if key == \"*\":\n input_text = \"\"\n elif key == \"A\":\n input_text = input_text[:-1]\n elif key in keypad_labels:\n input_text += key\n\nif __name__ == \"__main__\":\n # Main loop\n fill_screen(WHITE)\n while True:\n message_box()\n key_value()\n","repo_name":"akuetteh51/VitalCheck_V3","sub_path":"hardwareCode/final/key_padwithDisplay.py","file_name":"key_padwithDisplay.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"36566262673","text":"\nfrom interfata_grafica import *\nfrom prelucrare_fisiere import*\nfrom socket_comunicare import *\nfrom Thread_Prelucrare_ACK import *\nfrom socket_utile_c import *\n\nif __name__ == '__main__':\n # instantiez gui\n r = InterfataGrafica()\n # instantiez thread-urile\n t_trimitere = Thread_Trimitere()\n t_primire = Thread_Primire(r)\n t_prelucrare_ACK=Thread_Prelucrare_ACK()\n t_citire = Thread_Prelucrare()\n # le pornesc\n t_citire.start()\n t_trimitere.start()\n t_primire.start()\n t_prelucrare_ACK.start()\n\n #rulez bucla pentru gui\n r.start_interface()\n\n # dau join la threaduri\n t_citire.join()\n t_trimitere.join()\n t_primire.join()\n t_prelucrare_ACK.join()\n\n\n \n\n\n\n","repo_name":"anamariabagiu-99/AC_RC_P","sub_path":"transmitator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"10229710075","text":"print ('Enter a integer')\r\ng = int(input())\r\ndef line(n):\r\n if(n>g):\r\n return\r\n\r\n for i in range(0,n):\r\n print('*', end='')\r\n print()\r\n n+=1\r\n return line(n)\r\nline(1)","repo_name":"Wlaflamme/Files","sub_path":"final_exam_2.py","file_name":"final_exam_2.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"37048437284","text":"from pyrogram import Client, filters\r\nfrom pyrogram.errors import FloodWait\r\nfrom time import sleep\r\n\r\napp = Client(\"my_account\")\r\ncommand = filters.command(\"t\", prefixes=\".\")\r\n#type\r\n@app.on_message(command & filters.me)\r\ndef type(_, msg):\r\n orig_text = msg.text.split(\".t \", maxsplit=1)[1]\r\n text = orig_text\r\n tbp = \"\" # to be printed\r\n typing_symbol = \"*\"\r\n\r\n while (tbp != orig_text):\r\n try:\r\n msg.edit(tbp + typing_symbol)\r\n sleep(0.05) # 50 ms\r\n\r\n tbp = tbp + text[0]\r\n text = text[1:]\r\n\r\n msg.edit(tbp)\r\n sleep(0.05)\r\n\r\n except FloodWait as e:\r\n sleep(e.x)\r\napp.run()\r\n","repo_name":"alimogh/YobitScript","sub_path":"userbot.py","file_name":"userbot.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"8518028185","text":"def returnToBase(s):\n h = 0\n v = 0\n for ch in s:\n if(ch == \"L\"):\n h -= 1\n elif(ch == \"R\"):\n h += 1\n elif(ch == \"D\"):\n v -= 1\n elif(ch == \"U\"):\n v += 1\n if(not h and not v):\n return True\n else:\n return False\n\nprint(returnToBase(\"RUULLDRD\"))\n","repo_name":"gurjotsc/gcheema-cp","sub_path":"dailybyte/vacuumCleaner3.py","file_name":"vacuumCleaner3.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"2505557992","text":"#Inserting at the Head of Linked List Using Doubly Linked List\nclass Node:\n def __init__(self,data):\n self.data=data\n self.nextNode=None\n self.prevNode=None\nclass LinkedList:\n def __init__(self,value):\n self.head=Node(value)\n self.tail=self.head\n def insertAtHead(self,data):\n newNode = Node(data)\n newNode.nextNode=self.head\n newNode.prevNode=None\n self.head=newNode\n def traverse(self):\n head=self.head\n while head is not None:\n print(head.data)\n head=head.nextNode\nll=LinkedList(12)\nll.insertAtHead(90)\nll.insertAtHead(100)\n\nll.traverse()\n","repo_name":"bhatiakomal/pythonpractice","sub_path":"Exercise/Data_Structure/Linked_List2(15-09-2020/Doubly_Linked_List.py","file_name":"Doubly_Linked_List.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13915957037","text":"from typing import Optional, Iterable\n\nfrom tqdm import tqdm\nfrom utils import readfile\n\ndef parse_monkeys(data: Iterable[str]):\n monkes = list()\n monke = list()\n test = list()\n for row in data:\n if row.startswith(\"Monkey\") and len(monke) > 0:\n monke.append(test)\n monkes.append(Monke(monke[0], monke[1], monke[2]))\n monke = list()\n test = list()\n elif row.startswith(\"Starting\"):\n l = [int(i) for i in row.split(\":\")[1].strip().split(\", \")]\n monke.append(l)\n elif row.startswith(\"Operation\"):\n monke.append(row.split(\":\")[1].strip())\n elif row.startswith(\"Test\"):\n test.append(row.split(\":\")[1].strip())\n elif len(test) != 0 and row:\n test.append(row)\n elif row.startswith(\"Monkey 0:\"):\n pass\n else:\n assert not row, \"This should only be here if empty not \" + row\n if len(monke) > 1:\n monke.append(test)\n monkes.append(Monke(monke[0], monke[1], monke[2]))\n\n return monkes\nclass Monke:\n def __init__(self, starting_items, op, test):\n self.items = starting_items\n self.op = op\n self.test = test\n self.inspects = 0\n self.div_by_num = int(self.test[0].split(\" \")[-1])\n\n\n def apply_op(self, worry):\n op = self.op.split(\"=\")[1]\n if \"+\" in op:\n to_add = [o.strip() for o in op.split(\"+\")]\n if to_add[0] == \"old\":\n if to_add[1] != \"old\":\n worry += int(to_add[1])\n\n else:\n worry *= 2\n else:\n raise ValueError(op)\n elif \"*\" in op:\n todo = [o.strip() for o in op.split(\"*\")]\n if todo[0] == \"old\":\n if todo[1] == \"old\":\n worry *= worry\n else:\n worry *= int(todo[1])\n return worry\n def inspect_one_item(self):\n item = self.items.pop(0)\n worry = self.apply_op(item)\n worry = worry//3\n self.inspects += 1\n return self.run_test(worry), worry\n\n def inspect_one_item_2(self, mod_to_use):\n item = self.items.pop(0)\n worry = self.apply_op(item)\n worry = worry % mod_to_use\n self.inspects += 1\n return self.run_test(worry), worry\n def run_test(self, worry) -> int:\n if worry % self.div_by_num == 0:\n return int(self.test[1].split(\" \")[-1])\n else:\n return int(self.test[2].split(\" \")[-1])\n\n\nclass MonkeMaster:\n def __init__(self, monkes):\n self.monkes = monkes\n self.rounds = 0\n\n def run_round(self):\n m = 1\n t = [m.div_by_num for m in self.monkes]\n for i in t:\n m*=i\n for monke in self.monkes:\n for _ in list(monke.items):\n give_to, item = monke.inspect_one_item_2(mod_to_use=m)\n self.monkes[give_to].items.append(item)\n assert len(monke.items) == 0\n self.rounds += 1\n def __repr__(self):\n text = f\"Round {self.rounds}:\\n\"\n for (i, monke) in enumerate(self.monkes):\n text += f\"Monkey {i}: {', '.join((str(i) for i in monke.items))}, Inspected {monke.inspects}\\n\"\n return text\ndef partone(monkes):\n num = 10000\n for i in tqdm(range(num), total=num):\n monkes.run_round()\n\n monke_business = sorted([m.inspects for m in monkes.monkes], reverse=True)\n print(monkes)\n print(monke_business[0] * monke_business[1])\n\nif __name__ == \"__main__\":\n real_data = readfile(\"input_real.txt\")\n data = readfile(\"input.txt\")\n monkes = MonkeMaster(parse_monkeys(readfile(\"input_real.txt\")))\n\n partone(monkes) \n","repo_name":"ludde127/AdventOfCode2022","sub_path":"day11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"73607596553","text":"import torch\n\nobs, reward, Cd, Cl, f = torch.load('data/nse_data')\n# obs2, Cd2, Cl2, f2 = torch.load('data/nse_data_sparse')\n# print(obs.shape)\n\n# obs1 = obs[125:132]\n# Cd1 = Cd[125:132]\n# Cl1 = Cl[125:132]\n# f1 = f[125:132]\n\n# print(f1.shape)\n\n# obs = torch.cat((obs2, obs1))\n# print(obs.shape)\n\n# Cd = torch.cat((Cd2, Cd1))\n# Cl = torch.cat((Cl2, Cl1))\n# f = torch.cat((f2, f1))\n\n# print(Cd.shape, Cl.shape, f.shape)\ndata = [obs, Cd, Cl, f]\n\ntorch.save(data, 'data/nse_data')\n\n\nimport torch\nop = 'phase1_ex8_grid_pi'\nstate_dict, logs = torch.load(op)\nlogs.keys()\nlogs['modify'] = True\ntorch.save([state_dict, logs], op)","repo_name":"AsamaKiseto/Cylinder_Env","sub_path":"nse/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"16939976947","text":"# from https://github.com/jik876/hifi-gan\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport logging\n\nfrom torch.nn import Conv1d, ConvTranspose1d\nfrom .modules import UpsampleLayer\nfrom .modules import ResBlock1, ResBlock2, LRELU_SLOPE\nfrom .pqmf import PQMF\n\n\nclass MultiBandHiFiGANGenerator(torch.nn.Module):\n '''\n MultiBand-HiFiGAN model encountered the problem of strong checkerboard artifacts -- the generated audio\n has interference at a specific frequency. I used temporal nearest interpolation layer and tested\n some different upsample rate and kernel sizes. It still didn't completely solve the problem,\n but the parameter provided by this repo has made checkerboard artifacts relatively weak. \n In order to completely solve this problem when generating, a trick is adopted. You can refer to the code.\n '''\n\n def __init__(self,\n resblock_kernel_sizes=[3, 7, 11],\n upsample_rates=[10, 6],\n upsample_initial_channel=256,\n resblock_type=\"1\",\n upsample_kernel_sizes=[20, 12],\n resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],\n transposedconv=True,\n bias=True):\n super(MultiBandHiFiGANGenerator, self).__init__()\n self.num_kernels = len(resblock_kernel_sizes)\n self.num_upsamples = len(upsample_rates)\n self.conv_pre = Conv1d(80, upsample_initial_channel, 7, 1, padding=3, bias=bias)\n resblock = ResBlock1 if resblock_type == '1' else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):\n self.ups.append(\n UpsampleLayer(upsample_initial_channel // (2 ** i),\n upsample_initial_channel // (2 ** (i + 1)),\n upsample_rate=u,\n kernel_size=k,\n stride=1,\n padding=k // 2,\n bias=bias) if transposedconv == False else\n ConvTranspose1d(upsample_initial_channel // (2 ** i),\n upsample_initial_channel // (2 ** (i + 1)),\n k, u,\n padding=(u // 2 + u % 2),\n output_padding=u % 2,\n bias=bias))\n\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = upsample_initial_channel // (2 ** (i + 1))\n for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):\n self.resblocks.append(resblock(ch, k, d, bias=bias))\n\n self.conv_post = Conv1d(ch, 4, 7, 1, padding=3, bias=bias) # 4 band\n\n self.pqmf = PQMF() # 4 band\n # apply weight norm\n self.apply_weight_norm()\n # reset parameters\n self.reset_parameters()\n\n def remove_weight_norm(self):\n \"\"\"Remove weight normalization module from all of the layers.\"\"\"\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)\n\n def apply_weight_norm(self):\n \"\"\"Apply weight normalization module from all of the layers.\"\"\"\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)\n\n def reset_parameters(self):\n \"\"\"Reset parameters.\n This initialization follows official implementation manner.\n https://github.com/descriptinc/melgan-neurips/blob/master/mel2wav/modules.py\n \"\"\"\n def _reset_parameters(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):\n m.weight.data.normal_(0.0, 0.01)\n logging.debug(f\"Reset parameters in {m}.\")\n\n self.apply(_reset_parameters)\n\n def forward(self, x):\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n return x\n\n def inference(self, x):\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x, dtype=torch.float).to(next(self.parameters()).device)\n x = x.transpose(1, 0).unsqueeze(0)\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n x = self.pqmf.synthesis(x)\n return x.squeeze()\n","repo_name":"xcmyz/FastVocoder","sub_path":"model/generator/multiband_hifigan.py","file_name":"multiband_hifigan.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"27"}
+{"seq_id":"73299515913","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import QThread, pyqtSignal, QObject, Qt\nimport requests\nimport re\nimport json\nimport time\nfrom configparser import ConfigParser\n\nfund_file = 'fund.ini'\nconfig_file = 'conf.ini'\n\n\ndef read_cfg(pram):\n cfg = ConfigParser()\n cfg.read(config_file)\n _pram = cfg.get('configuration', pram)\n return _pram\n\n\ndef query_fund(code, name='', gszzl='', gztime=''):\n url = \"http://fundgz.1234567.com.cn/js/%s.js\" % code\n\n # 浏览器头\n headers = {'content-type': 'application/json',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}\n\n try:\n r = requests.get(url, headers=headers)\n # 返回信息\n content = r.text\n\n # 正则表达式\n pattern = r'^jsonpgz\\((.*)\\)'\n\n # 查找结果\n search = re.findall(pattern, content)\n\n name = json.loads(search[0])[\"name\"]\n gszzl = json.loads(search[0])[\"gszzl\"]\n gztime = json.loads(search[0])[\"gztime\"][-5:]\n\n return name, gszzl, gztime\n except:\n return name, gszzl, gztime\n\n\ndef fund_count():\n with open(fund_file, 'r') as f:\n funds = f.readlines()\n return len(funds)\n\n\ndef fund_data():\n _data = []\n with open(fund_file, 'r') as f:\n funds = f.readlines()\n for i in range(len(funds)):\n data = query_fund(funds[i].strip())\n _data.append(data)\n return _data\n\n\nclass BackendThread(QObject):\n # 通过类成员对象定义信号\n update_data = pyqtSignal(list)\n\n # 处理业务逻辑\n def flush_data(self):\n while True:\n _fund_data = fund_data()\n self.update_data.emit(_fund_data)\n time.sleep(int(read_cfg(\"flush_time\")))\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.resize(185, 160)\n self.setWindowTitle(\"Fund\")\n self.setWindowOpacity(float(read_cfg(\"WindowOpacity\"))) # 透明度\n # self.setWindowFlags(Qt.FramelessWindowHint) # 无边框\n self.setWindowFlags(Qt.WindowMinimizeButtonHint | # 使能最小化按钮\n Qt.WindowCloseButtonHint | # 使能关闭按钮\n Qt.WindowStaysOnTopHint) # 窗体总在最前端\n # self.setFixedSize(self.width(), self.height()) # 固定窗体大小\n self.create_table()\n self.setup_centralWidget()\n self.initUI()\n\n def create_table(self):\n self.table = QTableWidget()\n self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n HorizontalHeaderLabels = [\"基金名称\", \"涨幅(%)\", \"时间\"]\n columns = len(HorizontalHeaderLabels)\n self.table.setColumnCount(columns)\n self.rows = fund_count()\n self.table.setRowCount(self.rows) #\n self.headerWidth = (60, 50, 50)\n self.table.setSortingEnabled(True)\n self.table.horizontalHeader().setStyleSheet(\"QHeaderView::section{background-color:rgb(180,180,250);}\")\n for i in range(columns):\n self.table.setColumnWidth(i, self.headerWidth[i])\n self.table.setHorizontalHeaderLabels(HorizontalHeaderLabels)\n\n def update_table(self, _fund_data):\n for i in range(self.rows):\n item = QTableWidgetItem(_fund_data[i][0][0:4])\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.table.setItem(i, 0, item)\n item = QTableWidgetItem(_fund_data[i][1])\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.table.setItem(i, 1, item)\n item = QTableWidgetItem(_fund_data[i][2])\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.table.setItem(i, 2, item)\n\n def initUI(self):\n # 创建线程\n self.backend = BackendThread()\n # 连接信号\n self.backend.update_data.connect(self.update_table)\n self.thread = QThread()\n self.backend.moveToThread(self.thread)\n # 开始线程\n self.thread.started.connect(self.backend.flush_data)\n self.thread.start()\n\n def setup_centralWidget(self):\n # 设置主窗口中心部件\n self.tabWidget = QTabWidget()\n self.tabWidget.addTab(self.table, \"\")\n self.setCentralWidget(self.tabWidget) # 指定主窗口中心部件\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mw = MainWindow()\n mw.show()\n sys.exit(app.exec_())\n","repo_name":"dayerong/myfund","sub_path":"fund.py","file_name":"fund.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"}
+{"seq_id":"19009025498","text":"from pymongo import MongoClient\nimport json\n\nif __name__ == '__main__':\n mongo_cl = MongoClient('localhost', port=27017)\n db = mongo_cl['project2']\n coll = db.credentials\n\n with open('data/credentials.json', 'rb') as f:\n from_disk = json.load(f)\n\n coll.remove({}) #clear all\n coll.insert(from_disk)","repo_name":"georgeberry/aws_crawler","sub_path":"aws_server/insert_credentials.py","file_name":"insert_credentials.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"70102609031","text":"#! /usr/local/bin/python3\n# -*- coding:utf8 -*-\n\n\"\"\"\n function: 存储大量让游戏运行的函数\n\"\"\"\n\n\nimport sys\nimport pygame\n\n\ndef check_keydown_events(event, ship): \n \"\"\"\n 响应按键 \n \"\"\" \n if event.key == pygame.K_RIGHT:\n # ship.rect.centerx += 1\n ship.moving_right = True \n elif event.key == pygame.K_LEFT: \n ship.moving_left = True \n\n\ndef check_keyup_events(event, ship): \n \"\"\" \n 响应松开\n \"\"\" \n if event.type == pygame.K_RIGHT: \n ship.moving_right = False\n elif event.type == pygame.K_LEFT: \n ship.moving_left = False\n\n\ndef check_event(ship):\n \"\"\"\n 响应按键和鼠标事件\n :param ship: 飞船实例对象\n :return:\n \"\"\"\n for event in pygame.event.get():\n # print(event)\n # print(event.type)\n # print(pygame.QUIT)\n # 当玩家单机游戏窗口的关闭按钮时, 将检测到 pygame.QUIT 事件\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n # 向左移动飞船\n # check_keydown_events(event, ship)\n if event.key == pygame.K_RIGHT:\n # ship.rect.centerx += 1\n ship.moving_right = True \n elif event.key == pygame.K_LEFT: \n ship.moving_left = True \n \n # elif event.type == pygame.KEYUP:\n # # check_keyup_events(event, ship)\n # if event.type == pygame.K_RIGHT:\n # ship.moving_right = False\n # elif event.type == pygame.K_LEFT:\n # ship.moving_left = False\n\n\ndef update_screen(ai_settings, screen, ship):\n \"\"\"\n 更新屏幕上的图像, 并切换到新屏幕\n :param ai_settings: 初始化设置实例对象\n :param screen: 元素, 屏幕对象\n :param ship: 飞船实例对象\n :return:\n \"\"\"\n screen.fill(ai_settings.bg_color)\n ship.blitme()\n\n # 让最近绘制的屏幕可见, 不断更新屏幕, 以显示元素的新位置, 并在原来的位置隐藏元素, 从而营造平滑移动的效果\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"Julian-zly/python_code","sub_path":"flag/aline_invasion/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"29914654023","text":"import frappe\nimport json\nfrom frappe.utils import nowtime\n\nfrom frappe.exceptions import DoesNotExistError\nfrom frappe.utils import now_datetime\n\ndef cached_boot_info(bootinfo):\n\tglobal_config = json.loads(frappe.get_doc(\"Field App Config\", '').json_config or '{}')\n\n\tbootinfo.field_app = frappe._dict({\n\t\t'config': global_config,\n\t\t'time': nowtime()\n\t})\n\n\tuser_config = frappe.db.get_values(\"User Config\", {'user': bootinfo.user.name}, fieldname='json_config')\n\tif user_config:\n\t\tbootinfo.field_app.config.update(json.loads(user_config[0][0]))\n\ndef non_cached_boot_info(bootinfo):\n\tapp = frappe.get_request_header('App-Version')\n\tif app:\n\t\tdevice = json.loads(frappe.get_request_header('User-Id'))\n\t\tdevice_obj = frappe.get_doc(\"Field App Device\", device['serial'])\n\t\tbootinfo.field_app.config.update(json.loads(device_obj.json_config))\n\ndef on_session_creation(login_manager):\n\tapp = frappe.get_request_header('App-Version')\n\tif app:\n\t\tdevice = json.loads(frappe.get_request_header('User-Id'))\n\n\t\tdevice = {\n\t\t\t'serial': device['serial'],\n\t\t\t'platform': device['platform'],\n\t\t\t'platform_version': device['version'],\n\t\t\t'uuid': device['uuid'],\n\t\t\t'last_active_on': now_datetime(),\n\t\t\t'user': login_manager.user,\n\t\t\t'doctype': 'Field App Device',\n\t\t\t'last_active_user': login_manager.user,\n\t\t\t'app_version': frappe.get_doc(\"Field App Version\", app).name,\n\t\t\t'device_meta': frappe.get_request_header('User-Id')\n\t\t}\n\n\t\tdevice_obj = None\n\t\ttry:\n\t\t\tdevice_obj = frappe.get_doc(\"Field App Device\", device['serial'])\n\t\t\tdevice_obj.update(device)\n\t\texcept DoesNotExistError:\n\t\t\tdevice_obj = frappe.get_doc(device)\n\n\t\tdevice_obj.ignore_permissions = True\n\t\tdevice_obj.save()\n\n\t\tfrappe.db.commit()\n\n\t\tif not device_obj.enabled == '1':\n\t\t\tfrappe.throw(\"Device not approved!\")\n","repo_name":"BhupeshGupta/field_report","sub_path":"field_report/field_report/startup_boot_info.py","file_name":"startup_boot_info.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"34808108445","text":"import collections\na = [1,1,2,3,3,2,5]\nf = {}\n# frequency = collections.Counter(a)\n# print(dict(frequency))\nfor item in a:\n if item in f:\n f[item]+=1\n else:\n f[item]=1\nfor key,value in f.items():\n if value == 1:\n print(key)\n","repo_name":"ManasDixit190164/Python-Projects","sub_path":"Find unique element in list.py","file_name":"Find unique element in list.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"9346141995","text":"num1 = int(input(\"Digite um número: \"))\nnum2 = int(input(\"Digite outro número: \"))\n\nif num1 > num2:\n menor = num2\n maior = num1\nelif num2 > num1:\n menor = num1\n maior = num2\nelse:\n menor = num1\n maior = num1\n\ntotal = 0\nwhile menor <= maior:\n total += menor\n menor += 1\n\nprint(\"O valor final é {0}\".format(total))\n","repo_name":"vladprado/AD141","sub_path":"mywork/Chapter 2/ch02ex04.py","file_name":"ch02ex04.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"37800187194","text":"from ..panels import ABTab\n\n\nclass PluginTab(ABTab):\n \"\"\"Parent class of every plugin tab\"\"\"\n def __init__(self, plugin, panel, parent=None):\n \"\"\"\n :param plugin: instance of the plugin class\n :param panel: \"left\" or \"right\"\n \"\"\"\n super(PluginTab, self).__init__(parent)\n self.panel = panel\n self.plugin = plugin\n self.isPlugin = True\n self.setTabsClosable(True)\n","repo_name":"LCA-ActivityBrowser/activity-browser","sub_path":"activity_browser/layouts/tabs/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"27"}
+{"seq_id":"3690871607","text":"import csv\nimport os\n\n# Splite result of sentiment140 into three classification files\n\ndef rwData():\n\tfor filename in os.listdir(\"./sentiment140_output\"):\n\t\tif filename.endswith(\".csv\"): \n\t\t\tinputfilename = \"./sentiment140_output/\" + filename\n\t\t\twith open(inputfilename, 'rb') as inputfile, open(\"./sentiment_classification/positive.csv\", \"a\") as positive, open(\"./sentiment_classification/negative.csv\", \"a\") as negative, open(\"./sentiment_classification/neutral.csv\", \"a\") as neutral:\n\t\t\t\tfor line in inputfile:\n\t\t\t\t\tsplit = line.split(\",\", 1)\n\t\t\t\t\tclassification = split[0].replace(\"\\\"\", \"\")\n\t\t\t\t\ttext = split[1].replace(\"\\\"\", \"\")\n\t\t\t\t\treFormat = classification + \";\" + text\n\n\t\t\t\t\tif int(classification) == 0:\n\t\t\t\t\t\tnegative.write(reFormat)\n\t\t\t\t\telif int(classification) == 4:\n\t\t\t\t\t\tpositive.write(reFormat)\n\t\t\t\t\telif int(classification) == 2:\n\t\t\t\t\t\tneutral.write(reFormat)\n\ndef main():\n\trwData()\nmain()","repo_name":"Nirvash0912/EECS-510-Project-Code","sub_path":"sentiment/classify_output.py","file_name":"classify_output.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"31526709154","text":"from django import forms\n\nfrom .models import (\n Estudio,\n Collector,\n Estudio,\n Airports,\n BlackList,\n Messages,\n Template,\n Alerts\n)\n\n\nclass EstudioForm(forms.ModelForm):\n class Meta:\n models = Estudio\n fields = [\n \"name\",\n \"token\",\n \"survey\",\n \"message_remainder\",\n \"message_sentHour\",\n \"default_collector\",\n \"get_response_startOf\",\n \"get_response_endOf\",\n \"get_response_type\",\n \"comprehend_atribute\",\n \"comprehend_questions\",\n \"comprehend_textByResponse\",\n \"email_Source\"\n ]\n \n def __init__(self, *args, **kwargs):\n super(EstudioForm, self).__init__(*args, **kwargs) \n\nclass CollectorForm(forms.ModelForm):\n class Meta:\n models: Collector\n fields = [\n \"status\",\n \"redirect_url\",\n \"disqualification_url\",\n \"response_count\",\n \"closed_page_message\",\n \"href\",\n \"close_date\",\n \"display_survey_results\",\n \"open\",\n \"disqualification_type\",\n \"allow_multiple_responses\",\n \"anonymous_type\",\n \"name\",\n \"survey_id\",\n \"password_enabled\",\n \"date_modified\",\n \"url\",\n \"edit_response_type\",\n \"redirect_type\",\n \"sender_email\",\n \"thank_you_message\",\n \"date_created\",\n \"disqualification_message\",\n \"type\",\n \"response_limit\"\n ]\n \n def __init__(self, *args, **kwargs):\n super(CollectorForm, self).__init__(*args, **kwargs)\n\nclass AirportsForm(forms.ModelForm):\n class Meta:\n models = Airports\n fields = [\n 'iata',\n 'estudio'\n ]\n\n def __init__(self, *args, **kwargs):\n super(AirportsForm, self).__init__(*args, **kwargs)\n","repo_name":"LuisEnVilla/django-101","sub_path":"my_web/my_web/estudios/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"1798970496","text":"import pandas as pd\nimport numpy as np\nimport joblib\nimport os,sys\nimport benchmark_common as bcommon\nimport config as cfg\nimport argparse\nimport tools.funclib as funclib\nfrom tools.Attention import Attention\nfrom keras.models import load_model\nimport tools.embedding_esm as esmebd\nimport time\nfrom pandarallel import pandarallel # import pandaralle\n\n\n#region Integrate output\ndef integrate_out_put(existing_table, blast_table, dmlf_pred_table, mode='p', topnum=1):\n \"\"\"[Integrate output]\n\n Args:\n existing_table ([DataFrame]): [db search results table]\n blast_table ([DataFrame]): [sequence alignment results table]\n isEnzyme_pred_table (DataFrame): [isEnzyme prediction results table]\n how_many_table ([DataFrame]): [function counts prediction results table]\n ec_table ([DataFrame]): [ec prediction table]\n\n Returns:\n [DataFrame]: [final results]\n \"\"\"\n existing_table['res_type'] = 'db_match'\n blast_table['res_type']='blast_match'\n results_df = ec_table.merge(blast_table, on='id', how='left')\n\n function_df = how_many_table.copy()\n function_df = function_df.merge(isEnzyme_pred_table, on='id', how='left')\n function_df = function_df.merge(blast_table[['id', 'ec_number']], on='id', how='left')\n function_df['pred_function_counts']=function_df.parallel_apply(lambda x :integrate_enzyme_functioncounts(x.ec_number, x.isEnzyme_pred, x.pred_s, x.pred_m), axis=1)\n results_df = results_df.merge(function_df[['id','pred_function_counts']],on='id',how='left')\n\n results_df.loc[results_df[results_df.res_type.isnull()].index,'res_type']='dmlf_pred'\n results_df['pred_ec']=results_df.parallel_apply(lambda x: gather_ec_by_fc(x.iloc[3:23],x.ec_number, x.pred_function_counts), axis=1)\n results_df = results_df.iloc[:,np.r_[0,23,1,2,32,27:31]].rename(columns={'seq_x':'seq','seqlength_x':'seqlength'})\n\n\n if mode=='p':\n existing_table['pred_ec']=''\n result_set = pd.concat([existing_table, results_df], axis=0)\n result_set = result_set.drop_duplicates(subset=['id'], keep='first').sort_values(by='res_type')\n result_set['ec_number'] = result_set.apply(lambda x: x.pred_ec if str(x.ec_number)=='nan' else x.ec_number, axis=1)\n result_set.reset_index(drop=True, inplace=True)\n result_set = result_set.iloc[:,0:9]\n \n result_set['seqlength'] = result_set.seq.apply(lambda x: len(x))\n result_set['ec_number'] = result_set.ec_number.apply(lambda x: 'Non-Enzyme' if len(x)==1 else x)\n result_set = result_set.rename(columns={'ec_number':'ecrecer_pred_ec_number'})\n \n result_set = result_set[['id','ecrecer_pred_ec_number','seq','seqlength']]\n \n if mode =='r':\n result_set= results_df.merge(ec_table, on=['id'], how='left')\n result_set=result_set.iloc[:,np.r_[0:3,30,5:9, 4,10:30]]\n result_set = result_set.rename(columns=dict({'seq_x': 'seq','pred_ec': 'top0','top0_y': 'top1' }, **{'top'+str(i) : 'top'+str(i+1) for i in range(0, 20)}))\n# result_set = result_set.iloc[:,0:(8+topnum)]\n# result_set.loc[result_set[result_set.id.isin(existing_table.id)].index.values,'res_type']= 'db_match'\n \n result_set = result_set.iloc[:,np.r_[0, 2:4,8:(8+topnum)]]\n \n return result_set\n\n#endregion\n\n#region Predict Function Counts\ndef predict_function_counts(test_data):\n \"\"\"[Predict Function Counts]\n\n Args:\n test_data ([DataFrame]): [DF contain protein ID and Seq]\n\n Returns:\n [DataFrame]: [col1:id, col2: single or multi; col3: multi counts]\n \"\"\"\n res=pd.DataFrame()\n res['id']=test_data.id\n model_s = joblib.load(cfg.MODELDIR+'/single_multi.model')\n model_m = joblib.load(cfg.MODELDIR+'/multi_many.model')\n pred_s=model_s.predict(np.array(test_data.iloc[:,1:]))\n pred_m=model_m.predict(np.array(test_data.iloc[:,1:]))\n res['pred_s']=1-pred_s\n res['pred_m']=pred_m+2\n\n return res\n#endregion\n\n#region Integrate function counts by blast, single and multi\ndef integrate_enzyme_functioncounts(blast, isEnzyme, single, multi):\n \"\"\"[Integrate function counts by blast, single and multi]\n\n Args:\n blast ([type]): [blast results]\n s ([type]): [single prediction]\n m ([type]): [multi prediction]\n\n Returns:\n [type]: [description]\n \"\"\"\n if str(blast)!='nan':\n if str(blast)=='-':\n return 0\n else:\n return len(blast.split(','))\n if isEnzyme == 0:\n return 0\n if single ==1:\n return 1\n return multi\n#endregion\n\n#region format finnal ec by function counts\ndef gather_ec_by_fc(toplist, ec_blast ,counts):\n \"\"\"[format finnal ec by function counts]\n\n Args:\n toplist ([list]): [top 20 predicted EC]\n ec_blast ([string]): [blast results]\n counts ([int]): [function counts]\n\n Returns:\n [string]: [comma sepreated ec string]\n \"\"\"\n if counts==0:\n return '-'\n elif str(ec_blast)!='nan':\n return str(ec_blast)\n else:\n return ','.join(toplist[0:counts])\n#endregion\n\n\n\n\n#region run\ndef step_by_step_run(input_fasta, output_tsv, mode='p', topnum=1):\n \"\"\"[run]\n Args:\n input_fasta ([string]): [input fasta file]\n output_tsv ([string]): [output tsv file]\n \"\"\"\n start = time.process_time()\n if mode =='p':\n print('run in annoation mode')\n if mode =='r':\n print('run in recommendation mode')\n if mode =='h':\n print('run in hybrid mode')\n\n # 1. 读入数据\n print('step 1: loading data')\n input_df = funclib.load_fasta_to_table(input_fasta) # test fasta\n latest_sprot = pd.read_feather(cfg.FILE_LATEST_SPROT_FEATHER) #sprot db\n\n \n # 2. 查找数据\n print('step 2: find existing data')\n find_data = input_df.merge(latest_sprot, on='seq', how='left')\n find_data = latest_sprot[latest_sprot.seq.isin(input_df.seq)]\n find_data = find_data.drop_duplicates(subset='seq').reset_index(drop=True)\n exist_data = find_data.merge(input_df, on='seq', how='left').iloc[:,np.r_[8,0,1:8]].rename(columns={'id_x':'uniprot_id','id_y':'input_id'}).reset_index(drop=True)\n noExist_data = input_df[~input_df.seq.isin(find_data.seq)]\n\n if len(noExist_data) == 0 and mode=='p':\n exist_data=exist_data[['input_id','ec_number']].rename(columns={'ec_number':'ec_pred'})\n exist_data.to_csv(output_tsv, sep='\\t', index=False)\n end = time.process_time()\n print('All done running time: %s Seconds'%(end-start))\n return\n \n \n # 3. EMBedding\n print('step 3: Embedding')\n featurebank_esm32 = pd.read_feather(cfg.FILE_FEATURE_ESM32)\n\n existing_feature = featurebank_esm32[featurebank_esm32.id.isin(exist_data.uniprot_id)]\n existing_feature = exist_data[['input_id','uniprot_id']].merge(existing_feature.rename(columns={'id':'uniprot_id'}), on='uniprot_id', how='left').rename(columns={'input_id':'id'}).iloc[:,np.r_[0,2:existing_feature.shape[1]+1]]\n\n rep0, rep32, rep33 = esmebd.get_rep_multi_sequence(sequences=noExist_data, model='esm1b_t33_650M_UR50S',seqthres=1022)\n\n rep32 = pd.concat([existing_feature,rep32],axis=0).reset_index(drop=True)\n\n print('step 4: run prediction')\n\n if mode=='p':\n\n # 5. isEnzyme Prediction\n print('step 5: predict isEnzyme')\n pred_dmlf = pd.DataFrame(rep32.id.copy())\n model_isEnzyme = load_model(cfg.ISENZYME_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_isEnzyme.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t1=joblib.load(cfg.DICT_LABEL_T1)\n \n pred_dmlf['dmlf_isEnzyme']=(encoder_t1.inverse_transform(bcommon.props_to_onehot(predicted))).reshape(1,-1)[0]\n\n\n # 6. How many Prediction\n print('step 6: predict function counts')\n model_howmany = load_model(cfg.HOWMANY_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_howmany.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t2=joblib.load(cfg.DICT_LABEL_T2)\n pred_dmlf['dmlf_howmany']=(encoder_t2.inverse_transform(bcommon.props_to_onehot(predicted))).reshape(1,-1)[0]\n\n\n # 7. EC Prediction\n print('step 7: predict EC')\n model_ec = load_model(cfg.EC_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_ec.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t3=joblib.load(cfg.DICT_LABEL_T3)\n pred_dmlf['dmlf_ec']=[','.join(item) for item in (encoder_t3.inverse_transform(bcommon.props_to_onehot(predicted)))]\n\n\n print('step 8: integrate results')\n results = pred_dmlf.merge(exist_data, left_on='id',right_on='input_id', how='left') \n results=results.fillna('#')\n results['ec_pred'] =results.apply(lambda x : x.ec_number if x.ec_number!='#' else ('-' if x.dmlf_isEnzyme==False else x.dmlf_ec) ,axis=1)\n output_df = results[['id', 'ec_pred']].rename(columns={'id':'id_input'})\n\n elif mode =='r':\n # print('step 4: recommendation')\n # label_model_ec = pd.read_feather(f'{cfg.MODELDIR}/task3_labels.feather').label_multi.to_list()\n # model_ec = load_model(f'{cfg.MODELDIR}/task3_esm32_2022.h5',custom_objects={\"Attention\": Attention}, compile=False)\n # predicted = model_ec.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n # output_df=pd.DataFrame()\n # output_df['id']=input_df['id'].copy()\n # output_df['ec_recomendations']=pd.DataFrame(predicted).apply(lambda x :sorted(dict(zip((label_model_ec), x)).items(),key = lambda x:x[1], reverse = True)[0:topnum], axis=1 ).values\n\n print('step 4: predict EC')\n pred_dmlf = pd.DataFrame(rep32.id.copy())\n model_ec = load_model(cfg.EC_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_ec.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t3=joblib.load(cfg.DICT_LABEL_T3)\n pred_dmlf['dmlf_ec']=[','.join(item) for item in (encoder_t3.inverse_transform(bcommon.props_to_onehot(predicted)))]\n pred_dmlf['dmlf_recomendations']=pd.DataFrame(predicted).apply(lambda x :sorted(dict(zip((encoder_t3.classes_), x)).items(),key = lambda x:x[1], reverse = True)[0:topnum], axis=1 ).values\n output_df = pred_dmlf[['id', 'dmlf_recomendations']].rename(columns={'id':'id_input'})\n \n elif mode =='h':\n print('running in hybird mode')\n\n # 4. sequence alignment\n print('step 4: sequence alignment')\n if not os.path.exists(cfg.FILE_BLAST_PRODUCTION_DB):\n funclib.table2fasta(latest_sprot, cfg.FILE_BLAST_PRODUCTION_FASTA)\n cmd = r'diamond makedb --in {0} -d {1}'.format(cfg.FILE_BLAST_PRODUCTION_FASTA, cfg.FILE_BLAST_PRODUCTION_DB)\n os.system(cmd)\n blast_res = funclib.getblast_usedb(db=cfg.FILE_BLAST_PRODUCTION_DB, test=input_df)\n blast_res =blast_res[['id', 'sseqid']].merge(latest_sprot, left_on='sseqid', right_on='id', how='left').iloc[:,np.r_[0,2,3:10]].rename(columns={'id_x':'input_id','id_y':'uniprot_id'}).reset_index(drop=True)\n\n # 5. isEnzyme Prediction\n print('step 5: predict isEnzyme')\n pred_dmlf = pd.DataFrame(rep32.id.copy())\n model_isEnzyme = load_model(cfg.ISENZYME_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_isEnzyme.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t1=joblib.load(cfg.DICT_LABEL_T1)\n pred_dmlf['dmlf_isEnzyme']=(encoder_t1.inverse_transform(bcommon.props_to_onehot(predicted))).reshape(1,-1)[0]\n\n\n # 6. How many Prediction\n print('step 6: predict function counts')\n model_howmany = load_model(cfg.HOWMANY_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_howmany.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t2=joblib.load(cfg.DICT_LABEL_T2)\n pred_dmlf['dmlf_functions']=(encoder_t2.inverse_transform(bcommon.props_to_onehot(predicted))).reshape(1,-1)[0]\n\n\n # 7. EC Prediction\n print('step 7: predict EC')\n model_ec = load_model(cfg.EC_MODEL,custom_objects={\"Attention\": Attention}, compile=False)\n predicted = model_ec.predict(np.array(rep32.iloc[:,1:]).reshape(rep32.shape[0],1,-1))\n encoder_t3=joblib.load(cfg.DICT_LABEL_T3)\n pred_dmlf['dmlf_ec']=[','.join(item) for item in (encoder_t3.inverse_transform(bcommon.props_to_onehot(predicted)))]\n pred_dmlf['dmlf_recomendations']=pd.DataFrame(predicted).apply(lambda x :sorted(dict(zip((encoder_t3.classes_), x)).items(),key = lambda x:x[1], reverse = True)[0:topnum], axis=1 ).values\n\n pred_dmlf = pred_dmlf.merge(blast_res[['input_id','ec_number']].rename(columns={'ec_number':'blast_ec'}), left_on='id', right_on='input_id', how='left')\n # pred_dmlf['dmlf_recomendations']=pred_dmlf.apply(lambda x: x.dmlf_recomendations if x.dmlf_isEnzyme else '-', axis=1 )\n pred_dmlf['dmlf_ec']=pred_dmlf.apply(lambda x: x.dmlf_ec if x.dmlf_isEnzyme else '-', axis=1 )\n pred_dmlf = pred_dmlf.merge(exist_data[['input_id','ec_number']].rename(columns={'ec_number':'db_ec'}), on='input_id', how='left')\n pred_dmlf['dmlf_ec']=pred_dmlf.apply(lambda x: x.db_ec if str(x.db_ec)!='nan' else x.dmlf_ec,axis=1)\n pred_dmlf['dmlf_isEnzyme']=pred_dmlf.apply(lambda x: True if (str(x.db_ec)!='nan' and x.db_ec!='-') else x.dmlf_isEnzyme,axis=1)\n pred_dmlf['dmlf_functions']=pred_dmlf.apply(lambda x: len(x.db_ec.split(',')) if str(x.db_ec)!='nan' else x.dmlf_functions,axis=1)\n\n\n output_df = pred_dmlf[['id', 'dmlf_isEnzyme', 'dmlf_functions', 'dmlf_ec', 'dmlf_recomendations', 'blast_ec' ]].rename(columns={'id':'input_id'})\n\n else:\n print(f'mode:{mode} not found')\n sys.exit()\n\n\n print('step 9: writting results') \n\n output_df.to_csv(output_tsv, sep='\\t', index=False)\n\n print(output_df)\n \n end = time.process_time()\n print('All done running time: %s Seconds'%(end-start))\n#endregion\n\n\nif __name__ =='__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', help='input file (fasta format)', type=str, default=cfg.DATADIR + 'sample_10.fasta')\n parser.add_argument('-o', help='output file (tsv table)', type=str, default=cfg.RESULTSDIR + 'sample_10_2023_07_18.tsv')\n parser.add_argument('-mode', help='compute mode. p: prediction, r: recommendation', type=str, default='r')\n parser.add_argument('-topk', help='recommendation records, min=1, max=20', type=int, default='50')\n\n pandarallel.initialize() #init\n args = parser.parse_args()\n input_file = args.i\n output_file = args.o\n compute_mode = args.mode\n topk = args.topk\n \n step_by_step_run( input_fasta=input_file, \n output_tsv=output_file, \n mode=compute_mode, \n topnum=topk\n )\n ","repo_name":"kingstdio/ECRECer","sub_path":"production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":15053,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"27"}
+{"seq_id":"8074821101","text":"import glob\nimport json\nimport logging\nimport os\n\nfrom django.core.management.base import BaseCommand\n\nfrom backend.db_monitor.constants import TPLS_ALARM_DIR, TPLS_COLLECT_DIR, TargetPriority\n\nlogger = logging.getLogger(\"root\")\n\n\nclass Command(BaseCommand):\n help = \"策略模板文件修复\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"-t\", \"--type\", choices=[\"collect\", \"alarm\", \"all\"], default=\"all\", help=\"模板类型\")\n parser.add_argument(\"version\", type=int, help=\"版本号\")\n\n def update_json_file(self, f, template_dict):\n # print(f\"update json file: {f.name}\")\n f.seek(0)\n f.write(json.dumps(template_dict, indent=2))\n f.truncate()\n\n def update_version(self, template_dir, version):\n for json_file in glob.glob(os.path.join(template_dir, \"*.json\")):\n with open(json_file, \"r+\") as f:\n template_dict = json.loads(f.read())\n template_dict[\"version\"] = version\n self.update_json_file(f, template_dict)\n\n def handle(self, *args, **options):\n template_type = options[\"type\"]\n version = options[\"version\"]\n\n print(f\"update {template_type} -> version = {version}...\")\n\n if template_type in [\"all\", \"alarm\"]:\n self.update_version(TPLS_ALARM_DIR, version)\n\n if template_type in [\"all\", \"collect\"]:\n self.update_version(TPLS_COLLECT_DIR, version)\n","repo_name":"TencentBlueKing/blueking-dbm","sub_path":"dbm-ui/backend/db_monitor/management/commands/update_version.py","file_name":"update_version.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"27"}
+{"seq_id":"18041809013","text":"'''JEITO QUE EU FIZ\r\ngender = ['M', 'F']\r\nsex = ''\r\nwhile sex not in gender:\r\n sex = str(input('Enter the gender:[M/F]')).upper().strip()[0]\r\n if sex not in gender:\r\n print('INVALID OPTION, TRY AGAIN:', end='')\r\nprint('Sex \"{}\" Computed!'.format(sex))'''\r\n\r\nsex = str(input('Enter the Gender[M/F]:')).upper().strip()[0]\r\nwhile sex not in 'MF':\r\n sex = str(input('INVALID OPTION, TRY AGAIN: Enter the Gender[M/F]:')).upper().strip()[0]\r\nprint('Sex \"{}\" Computed'.format(sex))\r\n","repo_name":"franckallyson/Python-cursoemvideo","sub_path":"ex057.py","file_name":"ex057.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"22081468096","text":"\nimport torch\n\n\nclass TwoNN(torch.nn.Module):\n # linear model\n def __init__(self, input_dim, hidden_outdim, output_dim):\n super(TwoNN, self).__init__()\n self.fc1 = torch.nn.Linear(input_dim, hidden_outdim)\n self.relu = torch.nn.ReLU(inplace=True)\n self.fc2 = torch.nn.Linear(hidden_outdim, output_dim)\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n ","repo_name":"privacytrustlab/bias_in_FL","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"}
+{"seq_id":"239590767","text":"\nfrom importlib import import_module\n\nimport horovod.torch as hvd\nfrom torch import optim\n\n\ndef get_optimizer(model, lr_scheduler, weight_decay, distributed=False):\n lr_type = lr_scheduler['type']\n lr_scheduler = getattr(import_module(\n 'optim.' + lr_type), lr_type.capitalize().replace('_lr', 'LR'))(lr_scheduler['params'])\n\n optimizer = optim.SGD(\n filter(lambda p: p.requires_grad, model.parameters()),\n lr=lr_scheduler.initial_lr,\n momentum=0.9,\n nesterov=True,\n weight_decay=weight_decay)\n if distributed:\n optimizer = hvd.DistributedOptimizer(optimizer,\n named_parameters=filter(lambda p: p[1].requires_grad,\n model.named_parameters()))\n lr_scheduler.set_scheduler(optimizer)\n\n return optimizer, lr_scheduler\n","repo_name":"patykov/OnlineAction","sub_path":"optim/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"35007587990","text":"import numpy as np\nfrom scipy.special import gamma\n\n\ndef incidence_angle(terrain, look_angle=30):\n \"\"\"computes the radar incidence angle given its look angle and the terrain\n\n assumes the look angle is constant for the whole terrain\n \"\"\"\n\n if terrain.ndim != 2:\n raise ValueError('terrain must be 2 dimensional')\n grad = np.gradient(terrain)[1]\n incidence_angle = np.arctan(-grad)+np.radians(look_angle)\n if np.any(incidence_angle < 0):\n raise ValueError('negative incidence angle would result in shadowing')\n return incidence_angle\n\n\ndef radar_cross_section(theta, k=120, H=0.3, T=100):\n \"\"\"compute the radar cross section\n\n A simplified version of Equation (4.4) of \"A Fractal-Based Theoretical\n Framework for Retrieval of Surface Parameters from Electromagnetic\n Backscattering Data\"; Giorgioa Franceschetti et al.; IEEE TGRS 2000\n\n Parameters\n ----------\n theta : float numpy array\n incidence_angle\n k : float\n wavenumber\n H : float [0, 1]\n Hurst coefficient/exponent\n T : float\n topothesy\n\n \"\"\"\n def calc_S0(H, T):\n return H*T**(2*(1-H))*2**(2*H)*gamma(1+H)/gamma(1-H)\n\n S0 = calc_S0(H, T)\n # suppress warnings if theta is equal to zero and set the radar cross\n # section to 1\n zeros = theta == 0\n theta[zeros] = np.finfo(np.float32).eps\n rcs = np.cos(theta)**4*S0/(2*k*np.sin(theta))**(2+2*H)\n rcs[zeros] = 1\n return rcs\n","repo_name":"PFSWcas/insarsyn","sub_path":"backscatter.py","file_name":"backscatter.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"}
+{"seq_id":"17866509864","text":"\"\"\"\nCore Django views for VAR project\n\"\"\"\nimport json, os, logging\nfrom datetime import datetime\nfrom urllib.parse import urlencode, urlparse, parse_qs\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib.auth import forms as userforms\nfrom django.urls import reverse\nfrom django.shortcuts import render_to_response, redirect\nfrom django.utils.cache import patch_response_headers\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom clientmanagement import userfunctions, loginform, modelgetters\nfrom api_app.model_files import apikeysmodel\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\ndef initRequest(request):\n \"\"\"\n A function to check and verify request\n :param request:\n :return:\n \"\"\"\n\n url = request.get_full_path()\n u = urlparse(url)\n query = parse_qs(u.query)\n query.pop('timestamp', None)\n try:\n u = u._replace(query=urlencode(query, True))\n except UnicodeEncodeError:\n data = {\n 'errormessage': 'Error appeared while encoding URL!'\n }\n return False, render_to_response(json.dumps(data), content_type='text/html')\n\n ## Set default page lifetime in the http header, for the use of the front end cache\n request.session['max_age_minutes'] = 10\n\n ## Create a dict in session for storing request params\n requestParams = {}\n request.session['requestParams'] = requestParams\n if getattr(settings, 'SESSION_COOKIE_AGE', None):\n request.session.set_expiry(settings.SESSION_COOKIE_AGE)\n\n if request.method == 'POST':\n for p in request.POST:\n pval = request.POST[p]\n pval = pval.replace('+', ' ')\n request.session['requestParams'][p.lower()] = pval\n else:\n for p in request.GET:\n pval = request.GET[p]\n pval = pval.replace('+', ' ')\n\n ## Here check if int or date type params can be placed\n\n request.session['requestParams'][p.lower()] = pval\n\n return True, None\n\n@login_required( login_url = 'login' )\ndef homepage(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {}\n data['PAGE_TITLE'] = 'CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n return render(request, 'index.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef usermanagement(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {}\n if (request.method == 'POST') and ('action' in request.POST):\n if (request.POST['action'] == 'deleteuser') and ('target' in request.POST):\n success, message = userfunctions.deleteUserID(request.POST['target'])\n data['success'] = success\n data['message'] = message\n data['username'] = request.POST['target']\n return JsonResponse(data)\n \n data['userlist'] = userfunctions.getUserList()\n data['PAGE_TITLE'] = 'Manage users: CMS infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n return render(request, 'user/usermanagement.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef userpersonalpage(request, deleted=0, deletedpage=0):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {}\n data['deleted'] = deleted != 0\n data['del_page'] = deletedpage != 0\n data['PAGE_TITLE'] = 'Personal page: CMS infotek'\n data['api_key'] = len(apikeysmodel.UserAPIKey.get_api_keys(request.user))>0\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n return render(request, 'user/personal/main.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef deletepersonalapikey(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n result = apikeysmodel.UserAPIKey.delete_api_key(request.user)\n return redirect(reverse(\"personal_page_uri\", kwargs={\"deleted\": 1 if result else 0, \"deletedpage\":\"1\"}))\n\n\n@login_required( login_url = 'login' )\ndef createuser(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {}\n\n if (request.method == 'POST') and ('action' in request.POST):\n if (request.POST['action'] == 'validate') and ('target' in request.POST):\n if('value' in request.POST):\n if (request.POST['target']=='username'):\n success, message = userfunctions.checkUsernameExists(request.POST['value'])\n return JsonResponse({'success': success, 'message': message})\n elif(request.POST['target']=='email'):\n success, message = userfunctions.checkEmailExists(request.POST['value'])\n return JsonResponse({'success': success, 'message': message})\n elif(request.POST['target']=='password'):\n success, message = userfunctions.checkPasswordComplexity(request.POST['value'])\n return JsonResponse({'success': success, 'message': message})\n return JsonResponse({'success': False, 'message': 'Could not verify login'})\n else:\n return JsonResponse({'success': False, 'message': 'Could not verify login'})\n if (request.POST['action'] == 'createacc') and ('username' in request.POST) and ('password' in request.POST) and ('email' in request.POST) and \\\n ('firstname' in request.POST) and ('lastname' in request.POST):\n if (userfunctions.validateNewUser(request.POST['username'], request.POST['password'], request.POST['email'], request.POST['firstname'], request.POST['lastname'])):\n try:\n user = userfunctions.createUser(request.POST['username'], request.POST['password'], request.POST['email'], request.POST['firstname'], request.POST['lastname'])\n if user is not None:\n return redirect('/usermanagement')\n except Exception as exc:\n logger.error('!views.createuser!: Could not create user. \\n' + str(exc))\n data['username']=request.POST['username']\n data['password']=request.POST['password']\n data['email']=request.POST['email']\n data['firstname']=request.POST['firstname']\n data['lastname']=request.POST['lastname']\n data['creationfailed']=True\n \n data['PAGE_TITLE'] = 'Create user: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n return render(request, 'user/createuser.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef changeuser(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {}\n if (request.method == 'POST') and ('action' in request.POST):\n if (request.POST['action'] == 'validate') and ('target' in request.POST)and ('curusername' in request.POST):\n if('value' in request.POST):\n if (request.POST['target']=='username'):\n success, message = userfunctions.checkUsernameExists(request.POST['value'])\n return JsonResponse({'success': success, 'message': message})\n elif(request.POST['target']=='email'):\n success, message = userfunctions.checkEmailExists(request.POST['value'], request.POST['curusername'])\n return JsonResponse({'success': success, 'message': message})\n elif(request.POST['target']=='password'):\n success, message = userfunctions.checkPasswordComplexity(request.POST['value'])\n return JsonResponse({'success': success, 'message': message})\n return JsonResponse({'success': False, 'message': 'Could not verify login'})\n else:\n return JsonResponse({'success': False, 'message': 'Could not verify login'})\n elif (request.POST['action'] == 'changeemail') and ('username' in request.POST) and ('password' in request.POST) and ('email' in request.POST) and \\\n ('firstname' in request.POST) and ('lastname' in request.POST):\n if (userfunctions.validateNewUser(request.POST['username'], request.POST['password'], request.POST['email'], request.POST['firstname'], request.POST['lastname'])):\n try:\n user = userfunctions.createUser(request.POST['username'], request.POST['password'], request.POST['email'], request.POST['firstname'], request.POST['lastname'])\n if user is not None:\n return redirect('/usermanagement')\n except Exception as exc:\n logger.error('!views.createuser!: Could not create user. \\n' + str(exc))\n elif (request.POST['action'] == 'changeemail') and ('email' in request.POST):\n try:\n success, message = userfunctions.checkEmailExists(request.POST['email'], request.POST['curusername'])\n if success:\n if 'id' in request.POST:\n success = userfunctions.changeEmail( request.POST['email'], request.POST['id'])\n else:\n success = userfunctions.changeEmail(request.POST['email'], request.user.id)\n if user is not None:\n return redirect('/changeuser')\n except Exception as exc:\n logger.error('!views.createuser!: Could not create user. \\n' + str(exc))\n elif (request.POST['action'] == 'change') and ('id' in request.POST):\n cur_user = userfunctions.getUser(request.POST['id'])\n if cur_user is None:\n return redirect(reverse('usermanagement'))\n data['username']=cur_user.username\n data['email']=cur_user.email\n data['firstname']=cur_user.first_name\n data['lastname']=cur_user.last_name \n data['id']=cur_user.id\n\n if (request.method == 'GET'): \n data['username']=request.user.username\n data['email']=request.user.email\n data['firstname']=request.user.first_name\n data['lastname']=request.user.last_name\n \n data['PAGE_TITLE'] = 'change user: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n return render(request, 'user/changeuser.html', data, content_type='text/html')\n\n@login_required( login_url = 'login' )\ndef clientview(request, clientid):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = modelgetters.form_client_data(clientid)\n if data is None:\n return redirect('/') \n data['PAGE_TITLE'] = 'Client \"'+ data['clientname'] +'\": CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n data['needquillinput'] = True\n return render(request, 'views/client.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef allclientsview(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {'allclients': modelgetters.form_all_clients_data()}\n if data is None:\n return redirect('/') \n data['PAGE_TITLE'] = 'All clients: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n return render(request, 'views/allclients.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef allcomputersview(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {'allcomputers': modelgetters.form_all_computers_data()}\n if data is None:\n return redirect('/') \n data['PAGE_TITLE'] = 'All computers: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n return render(request, 'views/allcomputers.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef allpeopleview(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {'people': modelgetters.form_all_people_data()}\n if data is None:\n return redirect('/') \n data['PAGE_TITLE'] = 'All people: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n return render(request, 'views/allpeople.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef statisticsview(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = modelgetters.form_all_clients_statistics_data()\n if data is None:\n return redirect('/') \n data['PAGE_TITLE'] = 'Statistics: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n return render(request, 'views/statistics.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef allticketsview(request, reqtype):\n valid, response = initRequest(request)\n if not valid:\n return response\n if not reqtype in ['a', 'c']:\n reqtype='o'\n try:\n if reqtype == 'a':\n data = modelgetters.form_all_tickets_data()\n data['PAGE_TITLE'] = 'All Tickets: CMS Infotek'\n data['subtittle'] = 'All Tickets'\n data['closedticketson'] = True\n elif reqtype == 'c':\n data = modelgetters.form_closed_tickets_data()\n data['PAGE_TITLE'] = 'Closed Tickets: CMS Infotek'\n data['subtittle'] = 'Closed Tickets'\n data['closedticketson'] = True\n else:\n data = modelgetters.form_open_tickets_data()\n data['PAGE_TITLE'] = 'Open Tickets: CMS Infotek'\n data['subtittle'] = 'Open Tickets'\n data['closedticketson'] = False\n except Exception:\n return redirect('/') \n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n data['needquillinput'] = True\n return render(request, 'views/alltickets.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef systemupdatesview(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = modelgetters.form_updates_data() \n data['PAGE_TITLE'] = 'System Updates: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n data['needquillinput'] = True\n return render(request, 'views/updates.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef allWikiArticlesView(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = modelgetters.form_all_wiki_data() \n data['PAGE_TITLE'] = 'Wiki: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n data['needquillinput'] = False\n return render(request, 'views/allwiki.html', data, content_type='text/html')\n\n\n@login_required( login_url = 'login' )\ndef wikiArticleView(request, wikiuuid):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = modelgetters.form_one_wiki_data(wikiuuid) \n data['PAGE_TITLE'] = 'Wiki: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n data['needquillinput'] = True\n return render(request, 'views/wikiview.html', data, content_type='text/html')\n\n\n#@login_required( login_url = 'login' )\ndef allToolsView(request, tool_type):\n valid, response = initRequest(request)\n if not valid:\n return response\n if not tool_type in ['l', 'f']:\n tool_type=''\n try:\n if tool_type == 'l':\n data = modelgetters.form_all_link_tools_data(request.user)\n data['PAGE_TITLE'] = 'Link tools: CMS Infotek'\n data['subtittle'] = 'Link tools'\n elif tool_type == 'f':\n data = modelgetters.form_all_file_tools_data(request.user)\n data['PAGE_TITLE'] = 'File tools: CMS Infotek'\n data['subtittle'] = 'File tools'\n else:\n data = modelgetters.form_all_tools_data(request.user)\n data['PAGE_TITLE'] = 'All tools: CMS Infotek'\n data['subtittle'] = 'All tools'\n except Exception as exc:\n print(exc)\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = True\n return render(request, 'views/alltools.html', data, content_type='text/html')\n\n\ndef ticketdoneview(request):\n valid, response = initRequest(request)\n if not valid:\n return response\n data = {} \n data['PAGE_TITLE'] = 'Ticket submitted: CMS Infotek'\n data['built'] = datetime.now().strftime(\"%H:%M:%S\")\n data['needdatatables'] = False\n return render(request, 'forms/thankyouticket.html', data, content_type='text/html')","repo_name":"isstek/clientmanagement","sub_path":"clientmanagement/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"2149442423","text":"import streamlit as st\nfrom langchain import PromptTemplate\nfrom langchain.llms import OpenAI\n\ntemplate = \"\"\"\n Below is an email that may be poorly worded.\n Your goal is to:\n - Properly format the email \n - Convert the input text to a specified language\n\n Here are some examples of words in different languages:\n - English: French Fries, cotton candy, apartment, garbage, cookie, green thumb, parking lot, pants, windshield\n - Hindi: हम सप्ताहांत के लिए बार्सिलोना गए। हमें आपको बताने के लिए बहुत सी बातें हैं।\n - Punjabi: ਅਸੀਂ ਵੀਕਐਂਡ ਲਈ ਬਾਰਸੀਲੋਨਾ ਗਏ ਸੀ। ਸਾਡੇ ਕੋਲ ਤੁਹਾਨੂੰ ਦੱਸਣ ਲਈ ਬਹੁਤ ਸਾਰੀਆਂ ਚੀਜ਼ਾਂ ਹਨ।\n \n Below is the email, and language:\n LANGUAGE: {language}\n EMAIL: {email}\n \n YOUR {language} RESPONSE:\n\"\"\"\n\nprompt = PromptTemplate(\n input_variables=[\"language\", \"email\"],\n template=template,\n)\n\ndef load_LLM(openai_api_key):\n \"\"\"Logic for loading the chain you want to use should go here.\"\"\"\n # Make sure your openai_api_key is set as an environment variable\n llm = OpenAI(temperature=.7, openai_api_key=openai_api_key)\n return llm\n\nst.set_page_config(page_title=\"Globalize Email\", page_icon=\":robot:\")\nst.header(\"Globalize Text\")\n\ncol1, col2 = st.columns(2)\n\nwith col1:\n st.markdown(\"Often professionals would like to improve their emails, but don't have the skills to do so. \\n\\n This tool \\\n will help you improve your email skills by converting your emails into a more professional format.\")\n\nwith col2:\n st.image(image='test.png', width=500, caption='Powered by langchain, openai')\n\nst.markdown(\"## Enter Your Email To Convert\")\n\ndef get_api_key():\n input_text = st.text_input(label=\"OpenAI API Key \", placeholder=\"Ex: sk-2twmA8tfCb8un4...\", key=\"openai_api_key_input\")\n return input_text\n\nopenai_api_key = get_api_key()\n\n\noption_language = st.selectbox('Which language would you like to convert?',\n ('English', 'Hindi', 'Punjabi'))\n\ndef get_text():\n input_text = st.text_area(label=\"Email Input\", label_visibility='collapsed', placeholder=\"Your Email...\", key=\"email_input\")\n return input_text\n\nemail_input = get_text()\n\nif len(email_input.split(\" \")) > 700:\n st.write(\"Please enter a shorter email. The maximum length is 700 words.\")\n st.stop()\n\ndef update_text_with_example():\n print (\"in updated\")\n st.session_state.email_input = \"Sally I am starts work at yours monday from dave\"\n\nst.button(\"*See An Example*\", type='secondary', help=\"Click to see an example of the email you will be converting.\", on_click=update_text_with_example)\n\nst.markdown(\"### Your Converted Email:\")\n\nif email_input:\n if not openai_api_key:\n st.warning('Please insert OpenAI API Key. Instructions [here](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key)', icon=\"⚠️\")\n st.stop()\n\n llm = load_LLM(openai_api_key=openai_api_key)\n\n prompt_with_email = prompt.format(language=option_language, email=email_input)\n\n formatted_email = llm(prompt_with_email)\n\n st.write(formatted_email)","repo_name":"rohitdhamija/formatEmailText-streamlit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13853793831","text":"import numpy as n\npi = n.pi\n\ndef Iterate(P, F, to, R, ex, eq, E, v):\n '''\n Iterative scheme to calculate:\n - True thickness\n - True stresses\n - Log plastic strains\n Requires:\n - Pressure P\n - Force F\n - *Initial* thickness, to\n - Current radius, R\n - Axial log stn, ex\n - Hoop log stn, eq\n - Modulus\n - Poisson's ratio\n Returns:\n - t_tru, tau_x, tau_q, ep_x, ep_q, ep_3, e3\n \n '''\n # This first iteration was pulled out of while so that\n # there's no need for a if k==0 check in the loop\n # Initialize to erroneously high value\n t_tru = 10000 \n #Initial approximation of thickness\n ta = to*n.exp(-ex-eq)\n # Approximate true stress based on ta\n tau_x = P*R/(2*ta) + F/(2*pi*R*ta)\n tau_q = P*R/ta\n # e_plastic strain is e_tot minus e_elastic\n ep_x = ex - (tau_x - v*tau_q)/E \n ep_q = eq - (tau_q - v*tau_x)/E \n # Assume plastic incompressibility\n ep_3 = -(ep_x + ep_q) \n # Then add on the elastic part of e3 (plane stress) to get e3_total\n e3 = ep_3 - (v/E)*(tau_x + tau_q) \n t_tru = to*n.exp(e3) #Get new thickness\n \n itcount = 1\n itmax = 200\n #print('ta: ', ta)\n #print('t_tru: ', t_tru)\n while (min(ta/t_tru, t_tru/ta)<.999) and (itcount<=itmax):\n ta = t_tru\n # Approximate true stress based on ta\n tau_x = P*R/(2*ta) + F/(2*pi*R*ta) \n tau_q = P*R/ta\n # e_plastic strain is e_tot minus e_elastic\n ep_x = ex - (tau_x - v*tau_q)/E \n ep_q = eq - (tau_q - v*tau_x)/E \n # Assume plastic incompressibility\n ep_3 = -(ep_x + ep_q) \n # Then add on the elastic part of e3 (plane stress) to get e3_total\n e3 = ep_3 - (v/E)*(tau_x + tau_q) \n t_tru = to*n.exp(e3) #Get new thickness\n itcount+=1\n\n if (min(ta/t_tru, t_tru/ta)<.999):\n print('Max iteration reached')\n \n return t_tru, tau_x, tau_q, ep_x, ep_q, ep_3, e3\n\n","repo_name":"saintsfan342000/GM-Calibration","sub_path":"TrueStsStn.py","file_name":"TrueStsStn.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"17968261726","text":"print(\"1==grocery,2==clothes,3==icecream,4==e appliances\",sep=\"\\n\")\r\npay1=0\r\npay2=0\r\npay3=0\r\npay4=0\r\na=int(input(\"enter product nuumber\")) \r\nprint(\"Megha welcomes you to shopping\")\r\nif(a==1):\r\n #grocery\r\n n=int(input(\"enter the price of grocery to calculate discount:\"))\r\n if(n==100):\r\n disc=n*0\r\n elif(n==150):\r\n disc=n*0.02\r\n else:\r\n disc=n*0.05\r\n pay1=n-disc\r\n print(\"your grocery total is\",pay1)\r\nelif(a==2):\r\n #clothes\r\n kurti=300\r\n short_tops=500\r\n num_kurti=int(input(\"enter no of kurti purchased\"))\r\n num_short_tops=int(input(\"enter no of short tops purchased\"))\r\n pay2=kurti*num_kurti+short_tops*num_short_tops\r\n print(\"your total price for clothes is\",pay2)\r\nelif(a==3):\r\n #icecream\r\n butterscotch=50\r\n chocolate=30\r\n num_butterscotch=int(input(\"enter no of butterscotch purchased\"))\r\n num_chocolate=int(input(\"enter no of chocolate purchased\"))\r\n pay3=butterscotch*num_butterscotch+chocolate*num_chocolate\r\n print(\"your icecream purchase is\",pay3)\r\nelse:\r\n #e appliances\r\n n=int(input(\"enter the price of your e appliances to calculate discount:\"))\r\n if(n<=50000):\r\n disc=n*0.2\r\n elif(n<=75000):\r\n disc=n*0.5\r\n else:\r\n disc=n*0.7\r\n pay4=n-disc\r\n print(\"your e appliances total is\",pay4)\r\ntotal_purchase=pay1+pay2+pay3+pay4\r\nprint(total_purchase)\r\nif(total_purchase>30000):\r\n a=total_purchase*0.02\r\n gst=total_purchase+a\r\n print(\"your total purchase: \",gst)\r\nelse:\r\n a=total_purchase*0.03\r\n gst=total_purchase+a\r\n print(\"your total purchase: \",gst)\r\nprint(\"thank you for shopping and visit again\")\r\n","repo_name":"Megha2004/pythonBasics","sub_path":"shopping bill.py","file_name":"shopping bill.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"28777432414","text":"from ooo.oenv.env_const import UNO_NONE\nimport typing\n\n\nclass Boundary(object):\n \"\"\"\n Struct Class\n\n contains start and end position of a word.\n \n It is used in word break iterator and text conversion.\n\n See Also:\n `API Boundary `_\n \"\"\"\n __ooo_ns__: str = 'com.sun.star.i18n'\n __ooo_full_ns__: str = 'com.sun.star.i18n.Boundary'\n __ooo_type_name__: str = 'struct'\n typeName: str = 'com.sun.star.i18n.Boundary'\n \"\"\"Literal Constant ``com.sun.star.i18n.Boundary``\"\"\"\n\n def __init__(self, startPos: typing.Optional[int] = 0, endPos: typing.Optional[int] = 0) -> None:\n \"\"\"\n Constructor\n\n Arguments:\n startPos (int, optional): startPos value.\n endPos (int, optional): endPos value.\n \"\"\"\n super().__init__()\n\n if isinstance(startPos, Boundary):\n oth: Boundary = startPos\n self.startPos = oth.startPos\n self.endPos = oth.endPos\n return\n\n kargs = {\n \"startPos\": startPos,\n \"endPos\": endPos,\n }\n self._init(**kargs)\n\n def _init(self, **kwargs) -> None:\n self._start_pos = kwargs[\"startPos\"]\n self._end_pos = kwargs[\"endPos\"]\n\n\n @property\n def startPos(self) -> int:\n \"\"\"\n Start position of a word, inclusive\n \"\"\"\n return self._start_pos\n\n @startPos.setter\n def startPos(self, value: int) -> None:\n self._start_pos = value\n\n @property\n def endPos(self) -> int:\n \"\"\"\n End position of a word, exclusive\n \"\"\"\n return self._end_pos\n\n @endPos.setter\n def endPos(self, value: int) -> None:\n self._end_pos = value\n\n\n__all__ = ['Boundary']\n","repo_name":"Amourspirit/python-ooouno","sub_path":"ooo/lo/i18n/boundary.py","file_name":"boundary.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"5489634361","text":"import psycopg2\nimport requests\nimport json\nimport os\nfrom urllib.parse import urlparse\nimport functools\n\ndef connect():\n try:\n # Get the database URL from environment variables\n db_url = os.environ.get('DATABASE_URL')\n\n if db_url is None:\n # If DATABASE_URL is not set, construct the URL using individual parameters\n db_url = f\"postgresql://{os.environ.get('DATABASE_USER')}:{os.environ.get('DATABASE_PASSWORD')}@{os.environ.get('DATABASE_HOST')}:{os.environ.get('DATABASE_PORT')}/{os.environ.get('DATABASE_DATABASE')}\"\n\n # Parse the URL to extract connection parameters\n url_parts = urlparse(db_url)\n\n connection = psycopg2.connect(\n host=url_parts.hostname,\n database=url_parts.path[1:],\n user=url_parts.username,\n password=url_parts.password,\n port=url_parts.port\n )\n\n # Create a cursor object to interact with the database\n return connection\n\n except psycopg2.Error as e:\n print(\"Error connecting to the database:\", e)\n\ndef create_user(username, name, email, password_hash, phone):\n connection = connect()\n cursor = connection.cursor()\n\n # Use a SQL query to insert a new user into the \"users\" table\n insert_query = \"\"\"\n INSERT INTO users (username, name, email, password_hash, phone)\n VALUES (%s, %s, %s, %s, %s)\n RETURNING id;\n \"\"\"\n\n try:\n cursor.execute(insert_query, (username, name, email, password_hash, phone))\n user_id = cursor.fetchone()[0] # Get the ID of the newly created user\n connection.commit()\n return user_id\n except psycopg2.Error as e:\n print(\"Error creating a new user:\", e)\n return None\n finally:\n cursor.close()\n connection.close()\n\n\ndef get_user_by_email(email):\n select_query = \"SELECT id, username, email, role FROM users WHERE email = %s\"\n user = None\n\n try:\n with psycopg2.connect(os.environ.get('DATABASE_URL')) as connection:\n with connection.cursor() as cursor:\n cursor.execute(select_query, (email,))\n user = cursor.fetchone()\n except psycopg2.Error as e:\n print(\"Error retrieving user by username:\", e)\n\n return user\n\n\ndef get_user_by_id(user_id):\n select_query = \"SELECT id, username, name, email, phone, role FROM users WHERE id = %s\"\n user = None\n\n try:\n with psycopg2.connect(os.environ.get('DATABASE_URL')) as connection:\n with connection.cursor() as cursor:\n cursor.execute(select_query, (user_id,))\n user = cursor.fetchone()\n except psycopg2.Error as e:\n print(\"Error retrieving user by ID:\", e)\n\n return user # Returns the user's information if found, or None if not found\n\n\ndef get_user_by_username(username):\n select_query = \"SELECT id, username, name, email, password_hash, phone, role FROM users WHERE username = %s\"\n user = None\n\n try:\n with psycopg2.connect(os.environ.get('DATABASE_URL')) as connection:\n with connection.cursor() as cursor:\n cursor.execute(select_query, (username,))\n user = cursor.fetchone()\n except psycopg2.Error as e:\n print(\"Error retrieving user by username:\", e)\n\n return user\n\n\ndef get_clients():\n connection = connect()\n cursor = connection.cursor()\n select_query = \"SELECT id, username, name, email, phone, role FROM users ORDER BY id ASC\"\n cursor.execute(select_query)\n records = []\n for row in cursor.fetchall():\n record = {\n 'id': row[0],\n 'username': row[1],\n 'name': row[2],\n 'email': row[3],\n 'phone': row[4],\n 'role': row[5]\n\n }\n records.append(record)\n\n cursor.close()\n connection.close()\n return records\n\n\ndef get_client_cases(client_email):\n connection = connect()\n cursor = connection.cursor()\n select_query = \"SELECT case_id, case_status, case_work_progress, case_quote, case_notes\" \\\n \" FROM cases WHERE cases.client_email = '\" + client_email + \"' ORDER BY case_id ASC;\"\n cursor.execute(select_query)\n records = cursor.fetchall()\n\n select_client_name = \"SELECT name FROM users WHERE users.email = '\" + client_email + \"';\"\n cursor.execute(select_client_name)\n client_name = cursor.fetchone()\n\n records.insert(0, client_name)\n\n cursor.close()\n connection.close()\n return records\n\n\ndef get_all_cases():\n conn = connect()\n cursor = conn.cursor()\n\n # Execute the SQL query to retrieve case information with client email\n query = \"\"\"\n SELECT case_id, client_email, case_status, case_work_progress, case_quote, case_notes FROM cases;\n \"\"\"\n cursor.execute(query)\n cases = cursor.fetchall()\n\n cursor.close()\n conn.close()\n return cases\n\n\ndef get_case_details(case_id):\n conn = connect()\n cursor = conn.cursor()\n\n query = \"\"\" SELECT * FROM cases WHERE case_id = %s; \"\"\"\n cursor.execute(query, (case_id,))\n case = cursor.fetchall()\n\n cursor.close()\n conn.close()\n return case\n\n\ndef get_case_columns():\n conn = connect()\n cursor = conn.cursor()\n\n query = \"\"\"\n SELECT column_name\n FROM information_schema.columns\n WHERE table_name = 'cases'\n ORDER BY ordinal_position ASC;\n \"\"\"\n cursor.execute(query)\n columns = [column[0] for column in cursor.fetchall()]\n\n cursor.close()\n conn.close()\n return columns\n\n\ndef get_client_details(client_id):\n conn = connect()\n cursor = conn.cursor()\n\n query = \"\"\" SELECT id, username, name, email, phone, role FROM users WHERE id = %s; \"\"\"\n\n cursor.execute(query, (client_id,))\n client_details = cursor.fetchall()\n cursor.close()\n conn.close()\n return client_details\n\n\ndef update_user_client(**kwargs):\n user_details = get_user_by_email(kwargs['email'])\n user_id = user_details[0]\n update_user(user_id, **kwargs)\n\n\ndef update_user(user_id, **kwargs):\n conn = connect()\n cursor = conn.cursor()\n\n update_query = f\"\"\"\n UPDATE users\n SET\n username = '{kwargs['username']}',\n password_hash = '{kwargs['password_hash']}',\n email = '{kwargs['email']}',\n phone = '{kwargs['phone']}',\n name = '{kwargs['name']}',\n role = {kwargs['role']}\n WHERE id = {user_id};\n \"\"\"\n\n cursor.execute(update_query)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef fetch_monday():\n with open(\"jm/monday_api.txt\", \"r\") as f:\n api_key = f.readline().strip()\n\n with open(\"jm/monday_board_id.txt\", \"r\") as f:\n board_id = f.readline().strip()\n\n apiUrl = \"https://api.monday.com/v2\"\n headers = {\"Authorization\" : api_key}\n\n query = \"{boards(ids:\" + board_id + \") { name id description items { name column_values{title id type text } } } }\"\n data = {\"query\" : query}\n\n r = requests.post(url=apiUrl, json=data, headers=headers)\n monday_data = []\n \n if r.status_code == 200:\n response = r.json()\n for i in range(0, len(response[\"data\"][\"boards\"][0][\"items\"])):\n data = response[\"data\"][\"boards\"][0][\"items\"][i]\n new_dict = {'name': data['name']}\n new_dict.update({item['id']: item['text'] for item in data['column_values']})\n temp = format_dict(**new_dict)\n monday_data.append(temp) \n return monday_data\n else:\n print(f\"Error: {r.status_code} - {r.text}\")\n\ndef format_dict(**dict):\n perms = None\n\n if dict['permission_to_open'] == 'yes':\n perms = True\n \n if dict['email'] == '':\n placeholder = 'test@test.test'\n else:\n placeholder = dict['email']\n\n case = (\n placeholder,\n dict['drop_off'],\n dict['case_status'],\n dict['work_progress'],\n dict['malfunction'],\n dict['quote'],\n dict['type_of_device'],\n dict['important_folders'],\n dict['size'],\n perms,\n dict['date_received'],\n dict['date_quote_approved'],\n dict['date_completed'],\n dict['date_finalized'],\n dict['referred_by'],\n dict['notes']\n )\n\n # Replace empty strings with None\n case = tuple(None if value == '' else value for value in case)\n\n return case\n\n\ndef clear_cases():\n conn = connect()\n cursor = conn.cursor()\n\n delete_query = f\"\"\"\n DELETE FROM cases;\n \"\"\"\n\n cursor.execute(delete_query)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef create_from_monday():\n #list of monday data\n monday_data = fetch_monday()\n clear_cases()\n conn = connect()\n cursor = conn.cursor()\n insert_query = \"\"\"\n INSERT INTO cases (client_email, case_drop_off, case_status, case_work_progress,\n case_malfunction, case_quote, case_device_type, case_important_folders,\n case_size, case_permissions, case_date_recieved, case_date_quote_approved,\n case_completed_date, case_date_finalized, case_referred_by, case_notes)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n cursor.executemany(insert_query, monday_data)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef create_case(cases_data):\n conn = connect()\n cursor = conn.cursor()\n\n # Execute the SQL query to insert a new case\n insert_cases_query = \"\"\"\n INSERT INTO cases (client_email, case_drop_off, case_status, case_work_progress,\n case_malfunction, case_quote, case_device_type, case_important_folders,\n case_size, case_permissions, case_date_recieved, case_date_quote_approved,\n case_completed_date, case_date_finalized, case_referred_by, case_notes)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n\n cursor.execute(insert_cases_query, cases_data)\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef update_case(**kwargs):\n conn = connect()\n cursor = conn.cursor()\n\n update_query = f\"\"\"\n UPDATE cases \n SET \n client_email = '{kwargs['client_email']}',\n case_drop_off = '{kwargs['case_drop_off']}',\n case_status = '{kwargs['case_status']}',\n case_work_progress = '{kwargs['case_work_progress']}',\n case_malfunction = '{kwargs['case_malfunction']}',\n case_quote = {kwargs['case_quote']},\n case_device_type = '{kwargs['case_device_type']}',\n case_important_folders = '{kwargs['case_important_folders']}',\n case_size = '{kwargs['case_size']}',\n case_permissions = {kwargs['case_permissions']},\n case_date_recieved = '{kwargs['case_date_recieved']}',\n case_date_quote_approved = '{kwargs['case_date_quote_approved']}',\n case_completed_date = '{kwargs['case_completed_date']}',\n case_date_finalized = '{kwargs['case_date_finalized']}',\n case_referred_by = '{kwargs['case_referred_by']}',\n case_notes = '{kwargs['case_notes']}'\n WHERE case_id = {kwargs['case_id']};\n \"\"\"\n\n cursor.execute(update_query)\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef delete_case(case_id):\n conn = connect()\n cursor = conn.cursor()\n\n delete_query = f\"\"\"\n DELETE FROM cases\n WHERE case_id = {case_id};\n \"\"\"\n\n cursor.execute(delete_query)\n conn.commit()\n cursor.close()\n conn.close()\n","repo_name":"Tealiosv2/TeraDrive","sub_path":"backend/database_operations.py","file_name":"database_operations.py","file_ext":"py","file_size_in_byte":11571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"31311237567","text":"import openai\n\n# Load your API key from an environment variable or secret management service\n# open ai API key\nopenai.api_key = \"YOUR-API-KEY\"\n\n# Define the system message\nsystem_msg = \"You are an assistant that is responsible for taking the title and description of posts from Facebook marketplace and identifying what the item or items are that the user is sellings. Reply only with the items.\"\n\ndef ask_gpt(user_msg):\n # GPT response\n response = openai.ChatCompletion.create(model= \"gpt-3.5-turbo\",\n messages=[{\"role\": \"system\", \"content\": system_msg},\n {\"role\": \"user\", \"content\": user_msg}])\n content = response[\"choices\"][0][\"message\"][\"content\"]\n code = response[\"choices\"][0][\"finish_reason\"]\n\n return content","repo_name":"owenlheron/FB-Marketplace-Scanner","sub_path":"OpenAI_Lookup.py","file_name":"OpenAI_Lookup.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"8165907290","text":"import schedule\r\nimport time\r\nfrom datetime import datetime\r\nimport threading\r\n\r\ndef job():\r\n now = datetime.now()\r\n\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n print(current_time, \"Am arbeiten\")\r\n\r\ndef allefuenf():\r\n for i in range(4):\r\n now = datetime.now()\r\n\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n print(current_time,\"hallo\",i)\r\n time.sleep(1)\r\n\r\ndef run_threaded(funktion):\r\n thread = threading.Thread(target=funktion)\r\n thread.start()\r\n\r\n\r\nschedule.every(5).seconds.do(run_threaded, allefuenf)\r\n\r\nwhile True:\r\n schedule.run_pending()\r\n time.sleep(1)\r\n","repo_name":"anthonyeckl/Domainscrapeing","sub_path":"scheduletest.py","file_name":"scheduletest.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"7357131885","text":"import calendar\nimport datetime\n\nfrom flask import Blueprint, request\nfrom sqlalchemy import and_, text\n\nfrom patient.model import Patient\nfrom redisInit import rs\nfrom user.model import User\nfrom .model import Reserve, db\n\nreserve_blueprint = Blueprint('reserve', __name__)\n\n\ndef getUid():\n token = request.headers.get('Authorization')\n if token is None:\n return None\n uid = rs.get(token)\n print(uid)\n if uid is None:\n return None\n return uid.decode()\n\n\n@reserve_blueprint.before_request\ndef before_request():\n print('before_request')\n if getUid() is None:\n return {\n 'code': 0,\n 'message': '请登录'\n }, 401\n\n\n@reserve_blueprint.route('/reserve/command/list/time/query', methods=['POST'])\ndef getReservationByTime():\n start_time = request.json.get('startAt')\n end_time = request.json.get('endAt')\n # 禁止同时输入startAt endAt 和 year month\n # if start_time is not None and end_time is not None and query_year is None and query_month is not None:\n # return {\n # 'code': 0,\n # 'message': '禁止同时输入startAt endAt 和 year month',\n # 'data': []\n # }\n # 搜索\n day_list = Reserve.query.filter(\n and_(Reserve.reserve_time >= start_time, Reserve.reserve_time <= end_time)\n if start_time is not None and end_time is not None else text(''),\n )\n day_list = day_list.join(User, User.id == Reserve.doctor_id).join(Patient,\n Reserve.patient_id == Patient.id)\n day_list = day_list.add_entity(User).add_entity(Patient).all()\n print(\"查询结果\", day_list)\n\n if day_list is None:\n return {\n 'code': 0,\n 'data': [],\n 'message': '查询失败'\n }\n status_list = [\"success\", \"default\", \"error\"]\n return_result = []\n for item in day_list:\n return_result.append({\n 'id': item[0].id,\n 'type': status_list[item[0].rank],\n 'rank': item[0].rank,\n 'title': item[0].title,\n 'time': datetime.datetime.strptime(str(item[0].reserve_time), \"%Y-%m-%d %H:%M:%S\").strftime(\n \"%Y-%m-%d %H:%M:%S\"),\n 'doctor_Id': item[0].doctor_id,\n 'doctorName': item[1].realName,\n 'patientId': item[0].patient_id,\n 'patientName': item[2].name,\n 'description': item[0].description,\n })\n return {\n 'code': 1,\n 'data': return_result,\n 'message': '查询成功'\n }\n\n\n@reserve_blueprint.route('/reserve/command/list/date/query', methods=['POST'])\ndef getReservationByDate():\n query_year = request.json.get('year')\n query_month = request.json.get('month')\n query_start = None\n query_end = None\n if query_year is not None and query_month is not None:\n # 获得搜索日期的第一天和最后一天\n week_day, month_day_count = calendar.monthrange(query_year, query_month)\n query_start = datetime.datetime(query_year, query_month, 1, 0, 0, 0)\n query_end = datetime.datetime(query_year, query_month, day=month_day_count, hour=23, minute=59, second=59)\n\n day_list = Reserve.query.filter(\n and_(Reserve.reserve_time >= query_start, Reserve.reserve_time <= query_end)\n if query_start is not None and query_end is not None else text(''),\n )\n day_list = day_list.join(User, User.id == Reserve.doctor_id).join(Patient,\n Reserve.patient_id == Patient.id)\n day_list = day_list.add_entity(User).add_entity(Patient).all()\n print(\"查询结果\", day_list)\n\n if day_list is None:\n return {\n 'code': 0,\n 'data': [],\n 'message': '查询失败'\n }\n status_list = [\"success\", \"default\", \"error\"]\n return_result = []\n for item in day_list:\n return_result.append({\n 'id': item[0].id,\n 'type': status_list[item[0].rank],\n 'rank': item[0].rank,\n 'title': item[0].title,\n 'time': datetime.datetime.strptime(str(item[0].reserve_time), \"%Y-%m-%d %H:%M:%S\").strftime(\n \"%Y-%m-%d %H:%M:%S\"),\n 'doctor_Id': item[0].doctor_id,\n 'doctorName': item[1].realName,\n 'patientId': item[0].patient_id,\n 'patientName': item[2].name,\n 'description': item[0].description,\n })\n return {\n 'code': 1,\n 'data': return_result,\n 'message': '查询成功'\n }\n\n\n@reserve_blueprint.route('/reserve/command/add', methods=['POST'])\ndef addNewReservation():\n # 获取前端传来的数据\n doctor_id = request.json.get('doctorId')\n patient_id = request.json.get('patientId')\n reserve_time = request.json.get('time')\n title = request.json.get('title')\n rank = request.json.get('rank')\n description = request.json.get('description')\n # 检查数据\n if doctor_id is None or patient_id is None \\\n or reserve_time is None or title is None or rank is None or description is None:\n return {\n 'code': 0,\n 'message': '参数错误'\n }\n # 检查医生是否存在\n doctor = User.query.filter_by(id=doctor_id).first()\n if doctor is None:\n return {\n 'code': 0,\n 'message': '医生不存在'\n }\n # 检查患者是否存在\n patient = Patient.query.filter_by(id=patient_id).first()\n if patient is None:\n return {\n 'code': 0,\n 'message': '患者不存在'\n }\n now_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n format_pattern = '%Y-%m-%d %H:%M:%S'\n # 检查时间是否合法\n if (datetime.datetime.strptime(str(reserve_time), format_pattern) - datetime.datetime.strptime(now_time,\n format_pattern)).days < 0:\n return {\n 'code': 0,\n 'message': '时间不合法'\n }\n\n new_reserve = Reserve(\n doctor_id=doctor_id,\n patient_id=patient_id,\n reserve_time=reserve_time,\n title=title,\n description=description,\n rank=rank,\n operator_id=doctor_id,\n )\n db.session.add(new_reserve)\n try:\n db.session.commit()\n return {\n 'code': 1,\n 'message': '预约成功'\n }\n except Exception as e:\n print(e)\n return {\n 'code': 0,\n 'message': '预约失败',\n 'error': str(e)\n }\n\n\n@reserve_blueprint.route('/reserve/command/edit', methods=['POST'])\ndef editReservation():\n # 获取前端传来的数据\n reservation_id = request.json.get('reservation_id')\n doctor_id = request.json.get('doctorId')\n patient_id = request.json.get('patientId')\n reserve_time = request.json.get('time')\n title = request.json.get('title')\n rank = request.json.get('rank')\n description = request.json.get('description')\n query_reservation = Reserve.query.filter(Reserve.id == reservation_id).first()\n if query_reservation is None:\n return {\n 'code': 0,\n 'message': '该记录不存在'\n }\n\n # 检查医生是否存在\n doctor = User.query.filter_by(id=doctor_id).first()\n if doctor is None:\n return {\n 'code': 0,\n 'message': '医生不存在'\n }\n # 检查患者是否存在\n patient = Patient.query.filter_by(id=patient_id).first()\n if patient is None:\n return {\n 'code': 0,\n 'message': '患者不存在'\n }\n now_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n format_pattern = '%Y-%m-%d %H:%M:%S'\n # 检查时间是否合法\n if (datetime.datetime.strptime(str(reserve_time), format_pattern) - datetime.datetime.strptime(now_time,\n format_pattern)).days < 0:\n return {\n 'code': 0,\n 'message': '时间不合法'\n }\n\n query_reservation.doctor_id = doctor_id,\n query_reservation.patient_id = patient_id,\n query_reservation.reserve_time = reserve_time,\n query_reservation.title = title,\n query_reservation.description = description,\n query_reservation.rank = rank,\n query_reservation.operator_id = doctor_id,\n db.session.add(query_reservation)\n try:\n db.session.commit()\n return {\n 'code': 1,\n 'message': '修改预约成功'\n }\n except Exception as e:\n print(e)\n return {\n 'code': 0,\n 'message': '修改预约失败',\n 'error': str(e)\n }\n","repo_name":"RogisterDu/dcptbackend","sub_path":"reservation/reserve_blueprint.py","file_name":"reserve_blueprint.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"4427966675","text":"import os\nimport sys\nimport json\nimport importlib\n\nfrom . import globals\nfrom . import ts4\nfrom .util import *\nfrom .address import Cell\n\n###########################################################################\n## Public functions\n\ndef enable_fees(value):\n \"\"\"Enables gas consumtion accounting in the balance.\n\n :param bool value: `True` to enable gas accounting\n \"\"\"\n assert isinstance(value, bool)\n cfg = ts4.core.get_global_config()\n cfg.gas_fee = value\n ts4.core.set_global_config(cfg)\n\ndef set_balance(target, value):\n \"\"\"Sets balance for a given account.\n\n :param Address target: Target address\n :param num value: Desired balance\n \"\"\"\n ts4.ensure_address(target)\n ts4.core.set_balance(target.str(), int(value))\n\ndef set_trace_level(level):\n \"\"\"Sets the trace level for `core`.\n\n :param num value: desired trace level. Set `0` to disable trace logging\n \"\"\"\n cfg = ts4.core.get_global_config()\n cfg.trace_level = level\n ts4.core.set_global_config(cfg)\n\ndef set_trace_tvm(value):\n \"\"\"Enables TVM tracing.\n\n :param bool value: `True` to enable TVM tracing\n \"\"\"\n cfg = ts4.core.get_global_config()\n cfg.trace_tvm = value\n ts4.core.set_global_config(cfg)\n\ndef set_global_gas_limit(value):\n \"\"\"Sets global gas limit.\n\n :param num value: Desired global gas limit\n \"\"\"\n cfg = ts4.core.get_global_config()\n cfg.global_gas_limit = value\n ts4.core.set_global_config(cfg)\n\ndef get_cell_repr_hash(cell):\n \"\"\"Calculates hash of a given `Cell`.\n\n :param Cell cell: Cell to be hashed\n :return: Hexadecimal representation of the hash of the given cell\n :rtype: str\n \"\"\"\n assert isinstance(cell, Cell)\n return '0x' + ts4.core.get_cell_repr_hash(cell.raw_)\n\n\n###########################################################################\n## Internal functions\n\nclass ExecutionResult:\n def __init__(self, result):\n assert isinstance(result, str)\n result = json.loads(result)\n # ts4.dump_struct(result)\n self.data = result\n self.exit_code = result['exit_code']\n self.actions = result['out_actions']\n self.gas_used = result['gas']\n self.error = result['info']\n self.debot_answer_msg = result['debot_answer_msg']\n\ndef load_linker_lib():\n PACKAGE_DIR = os.path.basename(os.path.dirname(__file__))\n CORE = '.' + sys.platform + '.linker_lib'\n\n try:\n core = importlib.import_module(CORE, PACKAGE_DIR)\n except ImportError as err:\n print('Error: {}'.format(err))\n exit()\n except:\n print('Unsupported platform:', sys.platform)\n exit()\n return core\n\ndef dispatch_message_ext(msg_id):\n result = globals.core.dispatch_message(msg_id)\n return ExecutionResult(result)\n\ndef call_contract_ext(addr, method, params, is_getter = False, is_debot = False, private_key = None):\n assert isinstance(addr, ts4.Address)\n assert isinstance(params, dict)\n result = globals.core.call_contract(\n addr.str(), method, is_getter, is_debot, ts4.json_dumps(params), private_key,\n )\n return ExecutionResult(result)\n\ndef call_ticktock_ext(addr, is_tock):\n result = globals.core.call_ticktock(addr.str(), is_tock)\n return ExecutionResult(result)\n\ndef deploy_contract_ext(contract, ctor_params, initial_data, pubkey, private_key, wc, override_address, balance):\n address = globals.core.deploy_contract(\n contract.tvc_path,\n contract.abi_path,\n ts4.json_dumps(ctor_params) if ctor_params is not None else None,\n ts4.json_dumps(initial_data) if initial_data is not None else None,\n pubkey,\n private_key,\n wc,\n override_address,\n balance,\n )\n return address\n\n# __core__ = load_linker_lib()\n","repo_name":"ever-guild/TestSuite","sub_path":"tonos_ts4/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"74206848392","text":"import gym\nimport pathlib\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp\nimport argparse\nimport typing\nfrom copy import deepcopy\n\nfrom stable_baselines3.common.callbacks import ConvertCallback\nfrom stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize, VecVideoRecorder\nfrom stable_baselines3.common.monitor import Monitor\n\nfrom sac import SAC, SACPolicy\nfrom bsac import BSAC, BSACPolicy\nfrom env_cfg import env_cfg\nfrom sb_utils import MetricWrapper, RenderWrapper, EvalMetricCallback, create_noise\nimport utils\nimport plotting\n\n# Parameter defaults/allowed values:\nlog_dir = \"runs\"\nenvs = [\"Pendulum-v1\", \"LunarLanderContinuous-v2\"]\nbounds = [\"stabilize\", \"avoid\"]\nalgs = [\"sac\", \"bsac\"]\nrescale = [\"lin\", \"pwl\", \"hyp\", \"clip\"]\nseeds = [2345185777, 491764555, 1135283524, 2825301519, 4230406099,\n 1419159101, 2412356523, 3939077467, 4136346909, 2128146857]\nmodes = [\"train\", \"eval\", \"analyze\"]\neval_seed = 3687851522\neval_episodes = 10\neval_steps = 1000\nprocs = 4\n# All seed values above are randomly generated using:\n# >>> import numpy as np\n# >>> rng = np.random.default_rng()\n# >>> rng.integers(2**32, size=11)\n\nRENDER = False # Debug flag to enable rendering of the environment while training\n\n\nclass ObsMetricWrapper(MetricWrapper):\n AGGREGATIONS = {\"obs\": \"raw\"}\n\n def metrics(self, action, obs, reward, done):\n metrics = {\"obs\": obs}\n return metrics\n\n\nclass LunarMetricWrapper(MetricWrapper):\n AGGREGATIONS = {\"crash\": \"sum\", \"away\": \"sum\"}\n\n def metrics(self, action, obs, reward, done):\n metrics = {}\n if self.unwrapped.game_over:\n metrics[\"crash\"] = 1\n if abs(obs[0]) >= 1.0:\n metrics[\"away\"] = 1\n return metrics\n\n\nclass Run(object):\n\n def __init__(self, alg_id, env_id, seed, rescaling, bound, log_dir):\n self.alg_id = alg_id\n self.env_id = env_id\n self.seed = seed\n self.rescaling = rescaling\n self.bound = bound\n self.name = alg_id if alg_id == \"sac\" else f\"{alg_id}_{rescaling}\"\n self.root_dir = log_dir\n self.log_dir = pathlib.Path(log_dir, f\"{self.env_id}_{bound}\", self.name, f\"seed_{self.seed}\")\n self.alg = {\"sac\": SAC, \"bsac\": BSAC}[alg_id]\n self.model_path = self.log_dir / \"model.zip\"\n self.stats_path = self.log_dir / \"normalize_stats.pkl\"\n self.log_name = \"progress.csv\"\n self.video_path = self.log_dir / \"videos\"\n self.eval_path = self.log_dir / \"eval\"\n\n # Use tuned hyperparameters from stable-baselines ZOO:\n self.cfg = deepcopy(env_cfg[env_id][\"sac\"])\n if \"action_noise\" in self.cfg[\"kwargs\"]:\n env = self.create_base_env() # Create dummy environment to inspect action space\n self.cfg[\"kwargs\"][\"action_noise\"] = create_noise(self.cfg[\"kwargs\"][\"action_noise\"], env.action_space.shape)\n if alg_id == \"bsac\":\n self.cfg[\"kwargs\"][\"policy_kwargs\"][\"rescale\"] = rescaling\n self.cfg[\"kwargs\"][\"policy_kwargs\"][\"bounds\"] = self.cfg[\"bounds\"][bound]\n self.cfg[\"policy\"] = {\"sac\": SACPolicy, \"bsac\": BSACPolicy}[alg_id]\n\n self._model = None\n self._env = None\n\n @property\n def exists(self):\n return self.model_path.exists()\n\n def create_base_env(self, seed=None, render=False):\n env = Monitor(gym.make(self.env_id))\n if self.env_id == \"Pendulum-v1\":\n env = ObsMetricWrapper(env)\n elif self.env_id == \"LunarLanderContinuous-v2\":\n env = LunarMetricWrapper(env)\n if render:\n env = RenderWrapper(env)\n env.seed(self.seed if seed is None else seed)\n return env\n\n def create_vec_env(self, seed=None, render=False):\n return DummyVecEnv([lambda: self.create_base_env(seed, render)])\n\n def train(self):\n def on_eval(*args, **kwargs):\n self.save()\n eval_run = Run(self.alg_id, self.env_id, self.seed, self.rescaling, self.bound, self.root_dir)\n eval_run.eval(300, False, f\"eval{self._model.num_timesteps}\")\n self.analyze(f\"eval{self._model.num_timesteps}\")\n return True\n\n if not self.exists:\n # Automatically normalize the input features and reward\n self._env = VecNormalize(self.create_vec_env(render=RENDER), **self.cfg[\"norm_kwargs\"])\n eval_env = VecNormalize(self.create_vec_env(eval_seed), **self.cfg[\"norm_kwargs\"])\n # SB3 v1.5 bug workaround:\n self._env.obs_rms = eval_env.obs_rms = None\n eval_cb = EvalMetricCallback(eval_env, callback_after_eval=ConvertCallback(on_eval), n_eval_episodes=eval_episodes,\n eval_freq=5000, log_path=str(self.eval_path), best_model_save_path=str(self.eval_path))\n\n self._model = self.alg(self.cfg[\"policy\"], self._env, tensorboard_log=str(self.log_dir), verbose=1,\n seed=self.seed, **self.cfg[\"kwargs\"])\n self._model.learn(total_timesteps=self.cfg[\"total_timesteps\"], callback=eval_cb)\n self.save()\n\n def save(self):\n # Don't forget to save the VecNormalize statistics when saving the agent\n self._model.save(str(self.model_path))\n self._env.save(str(self.stats_path))\n\n def load(self):\n if self.exists:\n env = self.create_vec_env(eval_seed+1)\n # Uncomment to use best performing model instead of the last\n # self.model_path = self.eval_path / \"best_model\"\n # self.stats_path = self.eval_path / \"normalize_stats.pkl\"\n\n # Load the saved statistics\n self._env = VecNormalize.load(self.stats_path, env)\n # but do not update them at test time\n self._env.training = False\n\n # Load the agent\n self._model = self.alg.load(self.model_path, env=self._env)\n\n # Load the log\n log = pd.read_csv(str(self.log_dir / self.log_name), index_col=\"time/total_timesteps\")\n\n return self._env, self._model, log\n else:\n raise RuntimeError(f\"Run with parameters alg_id={self.alg_id}, env_id={self.env_id}, seed={self.seed} does\"\n f\"not exist yet. Call train first, before loading.\")\n\n def eval(self, length, video, prefix=\"eval\"):\n env, model, log = self.load()\n # Record the video starting at the first step\n if video:\n env = VecVideoRecorder(env, str(self.video_path), record_video_trigger=lambda x: x == 0,\n video_length=length, name_prefix=prefix)\n\n action_data = np.empty((6, length) + env.action_space.shape)\n obs = env.reset()\n for i in range(length):\n actions = model.predict(obs)[0]\n action_data[0, i, :] = actions[0, :]\n action_data[1:3, i, :] = model.policy._last_bounds if self.alg_id == \"bsac\" else [env.action_space.low, env.action_space.high]\n action_data[3, i, :] = model.policy._last_norm_action if self.alg_id == \"bsac\" else actions[0, :]\n action_data[4, i, :] = model.policy._last_mean\n action_data[5, i, :] = model.policy._last_std\n obs, _, _, _ = env.step(actions)\n # Save the video\n env.close()\n\n # Save action plots\n plot_actions(action_data[:3,:,:], [\"a\", \"l\", \"u\"], save_path=self.eval_path / f\"{prefix}-actions.svg\")\n plot_action_dists(action_data[4,:,:], action_data[5,:,:], save_path=self.eval_path / f\"{prefix}-action_dists.svg\")\n\n def fetch_analytics_data(self, uid):\n log = pd.read_csv(str(self.log_dir / self.log_name), index_col=\"time/total_timesteps\")\n r = log[\"eval/mean_reward\"].rename(f\"r_{uid}\").dropna()\n if self.env_id == \"Pendulum-v1\":\n obs = np.load(str(self.eval_path / \"obs.npy\"))\n return r, obs, eval_episodes\n elif self.env_id == \"LunarLanderContinuous-v2\":\n c = log[\"eval/crash/sum\"].rename(f\"c_{uid}\").dropna()\n a = log[\"eval/away/sum\"].rename(f\"a_{uid}\").dropna()\n return r, c, a, eval_episodes\n # Default: just return rewards\n return (r,)\n\n def analyze(self, prefix):\n data = self.fetch_analytics_data(prefix)\n data = [*zip(data)]\n self.do_analysis(data, self.env_id, self.eval_path, prefix)\n\n @staticmethod\n def do_analysis(data, env_id, save_path, prefix=\"\"):\n if env_id == \"Pendulum-v1\":\n pendulum_plots(data[1], data[2], save_path, prefix)\n elif env_id == \"LunarLanderContinuous-v2\":\n lunar_plots(data[1], data[2], data[3], save_path, prefix)\n\n\ndef plot_metrics(metrics, legends, xlabel='', ylabel='', title='', save_path=None, close=True):\n xs = [metric.index.values for metric in metrics]\n ys = [np.reshape(metric[\"mean\"].values, (-1, 1)) for metric in metrics]\n yms = [np.reshape(metric[\"min\"].values, (-1, 1)) for metric in metrics]\n yMs = [np.reshape(metric[\"max\"].values, (-1, 1)) for metric in metrics]\n alpha = 0.2\n return plotting.shaded_line_plot(xs, ys, yms, yMs, legends, alpha, xlabel, ylabel, title, save_path, close)\n\n\ndef plot_actions(actions, legends, title=None, save_path=None, close=True):\n max_comp = 4\n y_labels = [f\"Action {comp}\" for comp in range(min(max_comp, actions.shape[-1]))]\n return plotting.line_plots(actions[:,:,:max_comp], legends, \"timesteps\", y_labels, title, save_path, close)\n\n\ndef plot_action_dists(means, stds, save_path=None, close=True):\n return plotting.shaded_line_plot([np.arange(means.shape[0])], [means], [means-stds], [means+stds], save_path=save_path, close=close)\n\n\ndef pendulum_plots(obs, nb_evals, save_path, prefix=\"\"):\n if not (prefix == \"\" or prefix.endswith(\"-\")):\n prefix = prefix + \"-\"\n obs = np.concatenate(obs, axis=0)\n nb_evals = int(np.sum(nb_evals))\n\n max_vel = 8\n ang = np.arctan2(obs[:, 1], obs[:, 0])\n vel = np.minimum(np.abs(obs[:, 2]), max_vel) # Bound maximum velocity\n H, ang_edges, vel_edges = np.histogram2d(ang, vel, bins=[20, int(np.ceil(max_vel))],\n range=[[-np.pi, np.pi], [0, max_vel]])\n A, V = np.meshgrid(ang_edges, vel_edges)\n Hlog = np.log(H + 0.1) # Use log to be able to see initial visited states prior to stabilization\n f, ax = plt.subplots(subplot_kw={\"projection\": \"polar\"})\n ax.set_theta_zero_location(\"N\")\n ax.pcolormesh(A, V, Hlog.T)\n plotting._handle_figure(f, save_path / f\"{prefix}obs_hist.svg\", True)\n f, ax = plt.subplots(subplot_kw={\"projection\": \"polar\"})\n ax.set_theta_zero_location(\"N\")\n ax.set_ylim([0, max_vel])\n for i in range(nb_evals):\n ax.plot(ang[200 * i:200 * (i + 1)], vel[200 * i:200 * (i + 1)])\n plotting._handle_figure(f, save_path / f\"{prefix}obs.svg\", True)\n\n\ndef lunar_plots(crashes, aways, nb_evals, save_path, prefix):\n if not (prefix == \"\" or prefix.endswith(\"-\")):\n prefix = prefix + \"-\"\n crashes = sum(crashes)\n aways = sum(aways)\n nb_evals = int(sum(nb_evals))\n\n if len(crashes) > 1:\n ys = [100 * crashes / nb_evals, 100 * aways / nb_evals]\n f = plotting.stacked_fill_plot(crashes.index, ys, [\"Crash\", \"Astray\"], \"steps\", \"Episode end [%]\", close=False)\n f.axes[0].set_xlim([np.min(crashes.index), np.max(crashes.index)])\n f.axes[0].set_ylim([0, 100])\n plotting._handle_figure(f, save_path=save_path / f\"{prefix}endings.svg\", close=True) # f\"{prefix}endings.svg\"\n\n\ndef alg_generator(algs, rescalings):\n for alg_id in algs:\n if alg_id == \"bsac\":\n for rescaling in rescalings:\n yield alg_id, rescaling\n else:\n yield alg_id, None\n\n\ndef env_generator(envs, bounds):\n for env_id in envs:\n for bound in bounds:\n if bound in env_cfg[env_id][\"sac\"][\"bounds\"]:\n yield env_id, bound\n\n\ndef run(mode, alg_id, env_id, seed, rescaling, bound, log_dir, eval_length, video):\n import pybullet_envs\n load_torch()\n\n if not isinstance(alg_id, typing.List):\n run = Run(alg_id, env_id, seed, rescaling, bound, log_dir)\n if mode == \"train\":\n run.train()\n else:\n run.eval(eval_length, video)\n else:\n algs = alg_id\n rescalings = rescaling\n seeds = seed\n if mode == \"analyze\":\n # Bulk analytics mode\n returns = {}\n for alg_id, rescaling in alg_generator(algs, rescalings):\n data = []\n name = \"\"\n for seed in seeds:\n run = Run(alg_id, env_id, seed, rescaling, bound, log_dir)\n name = run.name\n # Gather analytics data:\n data.append(run.fetch_analytics_data(seed))\n data = [*zip(*data)]\n returns[name] = utils.process_metrics(data[0])\n Run.do_analysis(data, env_id, log_dir / f\"{env_id}_{bound}\" / \"charts\", name)\n\n # Create figures from return statistics:\n plot_metrics(returns.values(), returns.keys(), xlabel=\"steps\", ylabel=\"R\", title=\"Average return\",\n save_path=log_dir / f\"{env_id}_{bound}\" / \"charts\" / \"return.svg\")\n\n\ndef load_torch():\n import os\n os.environ['OPENBLAS_NUM_THREADS'] = '1' # Numpy import errors on some architectures without this\n import torch\n torch.set_num_threads(1) # Multiprocessing goes crazy slow without this\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\"State-dependent action bounds experiments\")\n # General args:\n parser.add_argument('--procs', default=procs, type=int, metavar='P', help=\"Number of processes to use for the experiments (default: %(default)d)\")\n parser.add_argument('--logdir', default=log_dir, type=str, metavar='PATH', help=\"Path to log directory, where experiment results and videos are stored (default: %(default)s)\")\n parser.add_argument('--mode', default=\"train\", type=str, metavar='MODE', help=\"Run mode: train, eval or analyze (default: %(default)s)\")\n # Run args:\n parser.add_argument('--envs', nargs='*', default=envs, choices=envs + [\"all\"], help=\"Environments to experiment on (default: 'all')\")\n parser.add_argument('--bounds', nargs='*', default=bounds, choices=bounds + [\"all\"], help=\"Action bounds to use in the chosen environment: stabilize or avoid (default: 'all')\")\n parser.add_argument('--algs', nargs='*', default=algs, choices=algs + [\"all\"], help=\"Algorithms to train with (default: 'all')\")\n parser.add_argument('--rescale', nargs='*', default=rescale, choices=rescale + [\"all\"], help=\"Rescaling (or clipping) function to use: lin, pwl, hyp or clip (default: 'all')\")\n parser.add_argument('--seeds', nargs='*', default=None, type=int, metavar='S', help=\"Seeds to use, -N to select N random seeds (default: 10 seeds used in paper)\")\n # Evaluation args:\n parser.add_argument('--evalsteps', default=eval_steps, type=int, metavar='S', help=\"Amount of simulated timesteps during evaluation (default: %(default)d)\")\n parser.add_argument('--novideo', action=\"store_false\", dest=\"video\", help=\"Do not create videos of the evaluations\")\n\n params = parser.parse_args(args)\n if \"all\" in params.envs:\n params.envs = envs\n if \"all\" in params.bounds:\n params.bounds = bounds\n if \"all\" in params.algs:\n params.algs = algs\n if \"all\" in params.rescale:\n params.rescale = rescale\n if params.seeds is None:\n params.seeds = seeds\n elif len(params.seeds) == 1 and params.seeds[0] < 0:\n N = -params.seeds[0]\n rng = np.random.default_rng()\n params.seeds = rng.integers(2**32, size=N).tolist()\n params.logdir = pathlib.Path(params.logdir)\n\n return params\n\n\nif __name__ == \"__main__\":\n def run_args_gen(params):\n if params.mode == \"analyze\":\n for env_id, bound in env_generator(params.envs, params.bounds):\n yield params.mode, params.algs, env_id, params.seeds, params.rescale, bound, params.logdir, params.evalsteps, params.video\n else:\n for alg_id, rescaling in alg_generator(params.algs, params.rescale):\n for env_id, bound in env_generator(params.envs, params.bounds):\n for seed in params.seeds:\n yield params.mode, alg_id, env_id, seed, rescaling, bound, params.logdir, params.evalsteps, params.video\n\n params = parse_args()\n args_gen = run_args_gen(params)\n if params.procs > 1:\n load_torch()\n mp.set_start_method('spawn') # This will make sure the different workers can use different seeds\n with mp.Pool(params.procs) as pool:\n pool.starmap(run, args_gen)\n else:\n for args in args_gen:\n run(*args)\n","repo_name":"dcbr/sdab","sub_path":"action_bounds.py","file_name":"action_bounds.py","file_ext":"py","file_size_in_byte":16867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"71881506952","text":"#!/usr/bin/env python3\n\nimport sys\nfrom typing import TextIO\nfrom copy import deepcopy\n\n\ndef parse_stacks(f: TextIO) -> list[list[str]]:\n tmp = \"\"\n n = 0\n for line in f:\n if \"1\" in line:\n n = int(line.strip().split(\" \")[-1])\n break\n else:\n tmp += line\n\n stacks: list[list[str]] = [[] for _ in range(n)]\n\n for line in tmp.split(\"\\n\"):\n i = 0\n while i * 4 < len(line):\n crate = line[i * 4 : (i * 4) + 3]\n if crate != \" \":\n stacks[i].append(crate[1])\n i += 1\n\n return [stack[::-1] for stack in stacks]\n\n\ndef parse_moves(f: TextIO) -> list[tuple[int, int, int]]:\n moves: list[tuple[int, int, int]] = []\n\n for line in f:\n if line == \"\\n\":\n continue\n\n parts = line.strip().split(\" \")\n moves.append((int(parts[1]), int(parts[3]) - 1, int(parts[5]) - 1))\n\n return moves\n\n\ndef load_input(path: str) -> tuple[list[list[str]], list[tuple[int, int, int]]]:\n with open(path, \"r\") as f:\n stacks = parse_stacks(f)\n moves = parse_moves(f)\n\n return stacks, moves\n\n\ndef do_moves(\n stacks: list[list[str]], moves: list[tuple[int, int, int]]\n) -> tuple[str, str]:\n cargo_update = deepcopy(stacks)\n\n for n, from_stack, to_stack in moves:\n stacks[to_stack] += stacks[from_stack][: -(n + 1) : -1]\n stacks[from_stack] = stacks[from_stack][:-n]\n\n cargo_update[to_stack] += cargo_update[from_stack][-n:]\n cargo_update[from_stack] = cargo_update[from_stack][:-n]\n\n return \"\".join([stack[-1] for stack in stacks]), \"\".join(\n [stack[-1] for stack in cargo_update]\n )\n\n\nif __name__ == \"__main__\":\n stacks, moves = load_input(sys.argv[1])\n part_1, part_2 = do_moves(stacks, moves)\n print(part_1)\n print(part_2)\n","repo_name":"CryptoCopter/AdventOfCode2022","sub_path":"05/cargo_crate_install.py","file_name":"cargo_crate_install.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"209344407","text":"import numpy as np\nimport os\nimport coco\nimport model as modellib\nimport glob\n\nimport imageio\nimport cv2\n\n# Root directory to project\nROOT_DIR = os.getcwd()\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Path to trained weights file\n# Download this file and place in the root of your \n# project (See README file for details)\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\nclass InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\nconfig = InferenceConfig()\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\nnumFiles = len(glob.glob('extractGif/*.jpg'))\ncounter = 0\n\nfor i in range(1, numFiles):\n filename = 'extractGif/%05d.jpg' % i\n print(\"doing frame %s\" % filename)\n frame = cv2.imread(filename)\n \n results = model.detect([frame], verbose=0)\n r = results[0]\n masky = np.zeros((frame.shape[0], frame.shape[1]), dtype='uint8')\n humans = []\n if r['rois'].shape[0] >= 1:\n for b in range(r['rois'].shape[0]):\n if r['class_ids'][b] == class_names.index('person'):\n masky += r['masks'][:,:,b] * 255\n humansM = r['masks'][:,:,b] * 255\n y1, x1, y2, x2 = r['rois'][b]\n humansCut = frame[y1:y2, x1:x2]\n humansCut = cv2.cvtColor(humansCut.astype(np.uint8), cv2.COLOR_BGR2RGBA)\n humansCut[:,:,3] = humansM[y1:y2, x1:x2]\n humans.append(humansCut)\n\n if len(humans) >= 1:\n counter += 1\n\n for j, human in enumerate(humans):\n fileout = 'giffer%i/%05d.png' % (j, counter)\n if not os.path.exists('giffer%i' % j):\n os.makedirs('giffer%i' % j)\n print(fileout)\n #frame = cv2.cvtColor(frame.astype('uint8'), cv2.COLOR_BGRA2BGR)\n imageio.imwrite(fileout, human)\n","repo_name":"burningion/daily-sketches","sub_path":"016/gif_extract.py","file_name":"gif_extract.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"27"}
+{"seq_id":"30288585546","text":"import json\nimport multiprocessing.shared_memory as shared_memory\nimport os\n\nimport cv2\nimport numpy as np\nfrom flasgger import Swagger\nfrom flask import Flask, Response, render_template, request\n\nfrom links.mq_req_rep.link import MessageQueueReqRep\n\napp = Flask(__name__)\nmq = MessageQueueReqRep()\nshared_frame = None\nweb_netron_port = None\nSwagger(app)\n\n# Set WeDX settings\nsettings = None\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.abspath(os.path.join(current_path, \"../.wedx/settings.json\"))) as fp:\n settings = json.load(fp)\n\n\n@app.route(\"/\", methods=[\"GET\"])\nasync def index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/netron\", methods=[\"GET\"])\nasync def netron():\n iframe = \"http://\" + request.host.split(\":\")[0] + \":\" + str(web_netron_port)\n return render_template(\"netron.html\", iframe=iframe)\n\n\n@app.route(\"/apis\", methods=[\"GET\"])\nasync def apis():\n iframe = (\n \"http://\"\n + request.host.split(\":\")[0]\n + \":\"\n + str(settings[\"webapi_port\"])\n + \"/apidocs\"\n )\n return render_template(\"apis.html\", iframe=iframe)\n\n\n@app.route(\"/startpipeline\", methods=[\"POST\"])\nasync def start_pipeline():\n \"\"\"Start pipeline\n ---\n tags:\n - WeDX Web API List\n responses:\n 200:\n description: OK\n \"\"\"\n message = await mq.client(message={\"method\": \"start_pipeline\"})\n return \"Call Start Pipeline method : \" + message\n\n\n@app.route(\"/stoppipeline\", methods=[\"POST\"])\nasync def stop_pipeline():\n \"\"\"Stop pipeline\n ---\n tags:\n - WeDX Web API List\n responses:\n 200:\n description: OK\n \"\"\"\n message = await mq.client(message={\"method\": \"stop_pipeline\"})\n return \"Call Stop Pipeline method : \" + message\n\n\n@app.route(\"/importpipeline\", methods=[\"POST\"])\nasync def import_pipeline():\n \"\"\"Import pipeline\n ---\n tags:\n - WeDX Web API List\n parameters:\n -\n name: body\n in: body\n required: true\n type: string\n responses:\n 200:\n description: OK\n \"\"\"\n data = request.data.decode(\"utf-8\")\n payload = json.loads(data)\n message = await mq.client(message={\"method\": \"import_pipeline\", \"payload\": payload})\n return \"Call Import Pipeline method : \" + message\n\n\n@app.route(\"/exportpipeline\", methods=[\"POST\"])\nasync def export_pipeline():\n \"\"\"Export pipeline\n ---\n tags:\n - WeDX Web API List\n responses:\n 200:\n description: OK\n \"\"\"\n message = await mq.client(message={\"method\": \"export_pipeline\"})\n return message\n\n\n@app.route(\"/stream\", methods=[\"GET\"])\nasync def stream():\n return render_template(\"stream.html\")\n\n\ndef gen():\n while True:\n _, frame = cv2.imencode(\".jpg\", shared_frame)\n if frame is not None:\n yield (\n b\"--frame\\r\\n\"\n b\"Content-Type: image/jpeg\\r\\n\\r\\n\" + frame.tobytes() + b\"\\r\\n\"\n )\n\n\n@app.route(\"/video_feed\", methods=[\"GET\"])\nasync def video_feed():\n return Response(gen(), mimetype=\"multipart/x-mixed-replace; boundary=frame\")\n\n\ndef run_api(width, height, netron_port, **kwargs):\n global shared_frame\n global web_netron_port\n existing_shm = shared_memory.SharedMemory(name=\"wedx_shm\")\n shared_frame = np.ndarray(\n (height, width, 3), dtype=np.uint8, buffer=existing_shm.buf\n )\n web_netron_port = netron_port\n app.run(**kwargs)\n","repo_name":"motojinc25/WeDX","sub_path":"src/servers/webapi.py","file_name":"webapi.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"27"}
+{"seq_id":"40395617564","text":"from console import Console\n\nclass HookAlwaysUp(Console):\n active = False\n def is_up(self, initial = False):\n if(not self.active and not initial):\n self.active = True\n return True\n else:\n return not self.active","repo_name":"koenschepens/service.kodi.voicecontrol","sub_path":"henk/flow/engines/input/hook_always_up.py","file_name":"hook_always_up.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"24860832597","text":"from flask import Flask, Blueprint,request,jsonify,render_template,redirect\nfrom auth import tokenCheck,verificar\nfrom app import db,bcrypt\nfrom models import Perfil, Cuenta,Mod,Admin, Noticia\nfrom sqlalchemy import exc \nfrom utils import encode_auth_token, decode_auth_token\n\n\napp = Flask(__name__)\n\nappmain = Blueprint('main', __name__, template_folder='templates', static_folder='static', static_url_path='/main/static')\n\n@appmain.route('/', methods=['GET','POST'])\ndef index():\n if request.method == \"GET\":\n image_urls = [\n \"https://media.istockphoto.com/id/514472018/es/foto/hermosa-boda-par-abrazar-cerca-de-columnas.jpg?s=612x612&w=0&k=20&c=uNaI5B3WpsnZVff6l6jvyhZMXd-DIkQl7ZCjO2W92K0=\",\n \"https://media.istockphoto.com/id/498477061/es/foto/siente-el-d%C3%ADa-de-su-boda-bliss.jpg?s=612x612&w=0&k=20&c=kYex3_JmNNEz6CdYfclcwKjxj2GGln-NU9QyXKdlb5Y=\",\n \"https://media.istockphoto.com/id/530188882/es/foto/retrato-de-una-joven-pareja-de-novios.jpg?s=612x612&w=0&k=20&c=SeUr9mYN6ZJ1phUPg05577uSMtO8-u4bSgcda-DfAus=\"\n ]\n noticias = Noticia.query.all()\n info_list = [\n {\n 'image_url': noticia.url if noticia.url \n else 'https://upload.wikimedia.org/wikipedia/commons/a/a3/Image-not-found.png',\n 'text1': noticia.text1,\n 'text2': noticia.text2\n }\n for noticia in noticias\n ]\n \n return render_template('main.html', image_urls=image_urls, info_list=info_list)\n else:\n try:\n token = request.json['cuenta_id']\n\n # Decodificar el token para obtener la información\n decoded_token = decode_auth_token(token)\n\n # Acceder al valor del campo \"sub\"\n if 'sub' in decoded_token:\n sub_value = decoded_token['sub']\n cuenta_id = int(sub_value)\n else:\n raise ValueError('El campo \"sub\" no está presente en el token')\n\n except ValueError as e:\n return jsonify({'status': 'error', 'message': f'Error al desencriptar el token: {str(e)}'})\n\n if cuenta_id:\n # Consultar admin\n admin = Admin.query.filter_by(cuenta_id=cuenta_id).first()\n cuentax = Cuenta.query.filter_by(id_cuenta = cuenta_id).first()\n if admin:\n return jsonify({\n 'status': 'success',\n 'rol': 'administrador',\n 'email': cuentax.email # Obtén el email desde la cuenta asociada al admin\n })\n\n # Consultar mod\n mod = Mod.query.filter_by(cuenta_id=cuenta_id).first()\n if mod:\n return jsonify({\n 'status': 'success',\n 'rol': 'moderador',\n 'email': mod.cuenta.email # Obtén el email desde la cuenta asociada al mod\n })\n \n # Consultar el perfil\n cuenta = Cuenta.query.filter_by(id_cuenta = cuenta_id).first()\n if cuenta:\n perfil = Perfil.query.filter_by(cuenta_id = cuenta.id_cuenta).first()\n \n if perfil:\n return jsonify({\n 'status': 'success',\n 'rol': 'casanova',\n 'email': perfil.usuario\n })\n cuenta = Cuenta.query.filter_by(id_cuenta = cuenta_id).first()\n if cuenta:\n nombre_componentes = [\n cuenta.primer_nombre,\n cuenta.otros_nombres,\n cuenta.primer_apellido,\n cuenta.segundo_apellido,\n ]\n\n # Filtrar los componentes que no son None\n nombre_no_none = [componente for componente in nombre_componentes if componente is not None]\n\n # Unir los componentes con un espacio en blanco\n nombre_completo = \" \".join(nombre_no_none)\n return jsonify({\n 'status': 'success',\n 'rol': 'pendiente',\n 'email': nombre_completo\n })\n return jsonify({'status': 'error', 'message': 'No se encontro el rol'})\n\n\ndef verificar_credenciales(principal, contra):\n # Buscar el usuario por correo electrónico en la tabla Cuenta\n cuenta = Cuenta.query.filter_by(email=principal.lower().strip()).first()\n\n if cuenta and bcrypt.check_password_hash(cuenta.password, contra):\n return True, cuenta\n else:\n # Si no se encontró en la tabla Cuenta, buscar en la tabla Perfil por usuario\n perfil = Perfil.query.filter_by(usuario=principal).first()\n\n if perfil and perfil.cuenta_id is not None:\n # Si se encuentra el perfil y tiene un cuenta_id válido, obtener la cuenta\n cuenta = Cuenta.query.filter_by(id_cuenta=perfil.cuenta_id).first()\n if cuenta and bcrypt.check_password_hash(cuenta.password, contra):\n return True, cuenta\n\n return False, None\n\n\n\n@appmain.route('/login',methods=[\"GET\",\"POST\"])\ndef login_post():\n if(request.method==\"GET\"):\n token = request.args.get('cuenta_id')\n if token:\n info = verificar(token)\n if(info['status']!=\"fail\"):\n responseObject={\n 'status':\"success\",\n 'message':'valid token',\n 'info':info\n }\n return jsonify(responseObject)\n return render_template('login.html')\n else:\n try:\n principal = request.json.get('principal')\n contra = request.json.get('contra')\n\n if not principal or not contra:\n responseObject = {\n 'status' : 'fail',\n 'message': 'Credenciales incompletas'\n }\n return jsonify(responseObject), 403\n\n success, cuenta = verificar_credenciales(principal, contra)\n\n if success:\n _id = cuenta.id_cuenta\n auth_token = cuenta.encode_auth_token(user_id=_id)\n responseObject = {\n 'status': 'success',\n 'login': 'Inicio de sesión exitoso',\n 'auth_token': auth_token\n }\n return jsonify(responseObject), 203\n else:\n # Usuario no encontrado o contraseña incorrecta\n responseObject = {\n 'status' : 'fail',\n 'message': 'Credenciales incorrectas'\n }\n return jsonify(responseObject), 403\n except Exception as e:\n return jsonify({'message': str(e)}), 503\n\n@appmain.route('/register')\ndef registro():\n return render_template('registro.html')\n\n@appmain.route('/cuenta',methods=[\"GET\",\"POST\"])\ndef registro_post():\n if request.method==\"GET\":\n return render_template('register.html')\n else:\n primer_nombre = request.json['pnombre']\n otro_nombre = request.json['snombre'] #opcional\n primer_apellido = request.json['papellido']\n segundo_apellido = request.json['sapellido'] #opcional\n fecha = request.json['fnacimiento']\n email=request.json['correo'].lower().strip()\n telefono = request.json['Telef'] #opcional\n password=request.json['password']\n usuario = Cuenta(primer_nombre=primer_nombre,otros_nombres=otro_nombre, primer_apellido=primer_apellido, segundo_apellido= segundo_apellido, fecha_nacimiento=fecha, telefono= telefono, email=email, password=password)\n userExists = Cuenta.query.filter_by(email=email).first()\n\n # Verificar si hay valores None y cambiarlos a cadena vacía\n otro_nombre = otro_nombre if otro_nombre is not None else ''\n segundo_apellido = segundo_apellido if segundo_apellido is not None else ''\n telefono = telefono if telefono is not None else ''\n\n if not userExists:\n try:\n db.session.add(usuario)\n db.session.commit()\n auth_token = usuario.encode_auth_token(user_id=usuario.id_cuenta)\n responseObject={\n 'status':'success',\n 'message':\"Registro exitoso\",\n 'cuenta_id': auth_token\n }\n except exc.SQLAlchemyError as e:\n responseObject={\n 'status':'error',\n 'message':e\n }\n else:\n responseObject={\n 'status':'error',\n 'message':'usuario existente con ese correo'\n }\n return jsonify(responseObject)\n \n@appmain.route('/terminos-y-condiciones',methods=[\"GET\",\"POST\"])\ndef terminos():\n return render_template('terminos-y-condiciones.html')\n\n@appmain.route('/procedimientos',methods=[\"GET\",\"POST\"])\ndef proceso():\n return render_template('procedimientos.html')\n\n@appmain.errorhandler(404)\ndef not_found_error(error):\n return render_template('404.html'), 404\n\n# Manejar el error 500 (error interno del servidor)\n@appmain.errorhandler(500)\ndef internal_error(error):\n return render_template('500.html'), 500","repo_name":"JonnyJaccob/MultiParadigTeam9","sub_path":"ProyectoFinal/routes/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"12842090299","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef load_data():\n\tdf = pd.read_csv('Iris.data')\n\treturn df\n\ndef segregate_data(df):\n\tfeatures = df.iloc[:,[0,1,2,3]]\n\ttextual_label = df.iloc[:,[4]]\n\tle = preprocessing.LabelEncoder()\n\t#0: Iris-setosa 1: Iris-versicolor 2: Iris-virginica\n\tlabel = le.fit_transform(np.ravel(textual_label))\n\treturn (features, label)\n\ndef create_test_and_train_set(features, label, test_size = 0.33):\n\tfeatures_train, features_test, label_train, label_test = train_test_split(features, label, test_size = test_size, random_state=0, stratify = label)\n\texecute_classifier(label_test, label_train, features_test, features_train, test_size)\n\ndef execute_classifier(label_test, label_train, features_test, features_train, test_size):\n\tclf = RandomForestClassifier(n_estimators = 10)\n\tclf.fit(features_train, np.ravel(label_train))\n\tpredict_sample(clf)\n\tprint(clf.predict_proba(features_test)[0:10])\n\tprint(\"Accuracy: \"+repr(round(clf.score(features_test, label_test) * 100, 2)) + \"% Test size: \"+repr(round(test_size * 100, 2))+\"%\")\n\ndef predict_sample(rf):\n\tprediction = rf.predict([[1.0,3.2,31,0]])\n\tprediction_post_string = \"setosa\" if prediction == 0 else \"versicolor\" if prediction == 1 else \"virginica\"\n\tprint(\"Iris-\"+prediction_post_string)\n\n\nfeatures, label = segregate_data(load_data())\nnp.random.seed(0)\ncreate_test_and_train_set(features, label)","repo_name":"sumitmukhija/Iris","sub_path":"Iris-by-Random-Forest-SK.py","file_name":"Iris-by-Random-Forest-SK.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"6766547680","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom RestApp.forms import ReForm,ItemsForm,UsgForm,Rltype,Rlupd,Pfupd,Chgepwd\nfrom RestApp.models import Restaurant,Itemlist,Rolereq,User\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import send_mail\nfrom Restaurant import settings\n\n# Create your views here.\ndef home(request):\n\tw = Restaurant.objects.filter(uid_id=request.user.id)\n\tt = Restaurant.objects.all()\n\treturn render(request,'app/home.html',{'c':w,'y':t})\n\ndef about(request):\n\treturn render(request,'app/about.html')\n\ndef contact(request):\n\treturn render(request,'app/contact.html')\n\n#def login(request):\n#\treturn render(request,'app/login.html')\n@login_required\ndef restlist(request):\n\ty = Restaurant.objects.filter(uid_id=request.user.id)\n\tif request.method == \"POST\":\n\t\tt = ReForm(request.POST,request.FILES)\n\t\tif t.is_valid():\n\t\t\tc = t.save(commit=False)\n\t\t\tc.uid_id = request.user.id\n\t\t\tc.save()\n\t\t\tmessages.success(request,\"Restaurant Added Successfully\")\n\t\t\treturn redirect('/rlist')\n\tt = ReForm()\n\treturn render(request,'app/restaurantlist.html',{'q':t,'a':y})\t\n\n@login_required\ndef rstup(request,m):\n\tk = Restaurant.objects.get(id=m)\n\tif request.method == \"POST\":\n\t\te = ReForm(request.POST,request.FILES,instance=k)\n\t\tif e.is_valid():\n\t\t\te.save()\n\t\t\tmessages.warning(request,\"{} Restaurant Updated Successfully\".format(k.Rname))\n\t\t\treturn redirect('/rlist')\n\te = ReForm()\t\n\treturn render(request,'app/restupdate.html',{'x':e})\n\ndef rstdl(request,n):\n\ts = Restaurant.objects.get(id=n)\n\tif request.method == \"POST\":\n\t\tmessages.info(request,\"{} Restaurant Deleted Successfully\".format(s.Rname))\n\t\ts.delete()\n\t\treturn redirect('/rlist')\t\n\treturn render(request,'app/restdelete.html',{'y':s})\n\ndef rstvw(request,a):\n\tv = Restaurant.objects.get(id=a)\n\treturn render(request,'app/restview.html',{'z':v})\n\n\ndef itlist(request):\n\tst = list(Restaurant.objects.filter(uid_id=request.user.id))\n\tprint(st,type(st))\n\tmm = Itemlist.objects.all()\n\td,i = {},0\n\tfor mp in mm:\n\t\tfor h in st:\n\t\t\tif mp.rsid_id == h.id:\n\t\t\t\td[i] = mp.iname,mp.icategory,mp.price,mp.iimage,mp.itavailability,mp.id,h.Rname\n\t\t\t\ti = i+1\n\tif request.method == \"POST\":\n\t\tk = ItemsForm(request.POST,request.FILES)\n\t\tif k.is_valid():\n\t\t\tn = k.save(commit=False)\n\t\t\tmessages.success(request,'{} Item is Added Successfully'.format(n.iname))\n\t\t\tn.save()\n\t\t\treturn redirect('/ilist')\n\tk = ItemsForm()\t\t\n\treturn render(request,'app/itmlist.html',{'r':k,'er':st,'s':d.values()})\n\ndef usrreg(request):\n\tif request.method == \"POST\":\n\t\td = UsgForm(request.POST)\n\t\tif d.is_valid():\n\t\t\td.save()\n\t\t\treturn redirect('/login')\n\td = UsgForm()\t\t\n\treturn render(request,'app/usrregister.html',{'t':d})\n\ndef itup(request,s):\n\tk = Itemlist.objects.get(id=s)\n\tif request.method == \"POST\":\n\t\te = ItemsForm(request.POST,request.FILES,instance=k)\n\t\tif e.is_valid():\n\t\t\te.save()\n\t\t\tmessages.warning(request,\"{} Itemlist Updated Successfully\".format(k.iname))\n\t\t\treturn redirect('/ilist')\n\te = ItemsForm()\t\n\treturn render(request,'app/itemupdate.html',{'x':e})\n\ndef itdl(request,p):\n\ts = Itemlist.objects.get(id=p)\n\tif request.method == \"POST\":\n\t\tmessages.info(request,\"{} Itemlist Deleted Successfully\".format(s.iname))\n\t\ts.delete()\n\t\treturn redirect('/rlist')\t\n\treturn render(request,'app/itemdelete.html',{'y':s})\n\ndef itvw(request,b):\n\tv = Itemlist.objects.get(id=b)\n\treturn render(request,'app/itemview.html',{'z':v})\n\n\n@login_required\ndef rolereq(request):\n\tp = Rolereq.objects.filter(ud_id=request.user.id).count()\n\tif request.method == \"POST\":\n\t\tk = Rltype(request.POST,request.FILES)\n\t\tif k.is_valid():\n\t\t\tprint(k)\n\t\t\ty = k.save(commit=False)\n\t\t\ty.ud_id = request.user.id\n\t\t\ty.uname = request.user.username\t\n\t\t\ty.save()\n\t\t\treturn redirect('/')\n\tk = Rltype()\n\treturn render(request,'app/rolereq.html',{'d':k,'c':p})\n\n@login_required\ndef gveperm(request):\n\tu = User.objects.all()\n\tr = Rolereq.objects.all()\n\td = {}\n\tfor n in u:\n\t\tfor m in r:\n\t\t\tif n.is_superuser == 1 or n.id != m.ud_id:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\td[m.id] = m.uname,m.rltype,n.role,m.id\t\t\t\n\treturn render(request,'app/gvper.html',{'h':d.values()})\n\n@login_required\ndef gvupd(request,t):\n\ty = Rolereq.objects.get(id=t)\n\td = User.objects.get(id=y.ud_id)\n\tif request.method == \"POST\":\n\t\tn = Rlupd(request.POST,instance=d)\n\t\tif n.is_valid():\n\t\t n.save()\n\t\t y.is_checked = 1\n\t\t y.save()\n\t\t return redirect('/gvper')\n\tn = Rlupd(instance=d)\n\treturn render(request,'app/gvpermssion.html',{'c':n})\n\n@login_required\ndef pfle(request):\n\treturn render(request,'app/profile.html')\n\n@login_required\ndef feedback(request):\n\tif request.method == 'POST':\n\t\tsd = request.POST['snmail']\n\t\tsm = request.POST['sub']\n\t\tmg = request.POST['msg'] \n\t\trt = settings.EMAIL_HOST_USER\n\t\tdt = send_mail(sm,mg,rt,[sd])\n\t\tif dt == 1:\n\t\t\treturn redirect('/')\n\treturn render(request,'app/feedback.html')\n\n@login_required\ndef pfleupd(request):\n\tt = User.objects.get(id=request.user.id)\n\tif request.method == \"POST\":\n\t\tpfl = Pfupd(request.POST,request.FILES,instance=t)\n\t\tif pfl.is_valid():\n\t\t\tpfl.save()\n\t\t\treturn redirect('/pfle')\n\tpfle = Pfupd(instance=t)\n\treturn render(request,'app/pfleupdate.html',{'u':pfle})\t\n\n@login_required\ndef changepwd(request):\n\tif request.method == \"POST\":\n\t\tk = Chgepwd(user=request.user,data=request.POST)\n\t\tif k.is_valid():\n\t\t\tk.save()\n\t\t\treturn redirect('/login')\n\tk = Chgepwd(user=request)\n\treturn render(request,'app/changepwd.html',{'t':k})","repo_name":"salma1819/GitBasics","sub_path":"daily work/RestApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"71778953674","text":"import time\n\n\nclass MyTimer:\n def __init__(self):\n self.unit = ['年', '月', '天', '小时', '分钟', '秒']\n self.prompt = '未开始定时器'\n self.lasted = []\n self.begin = 0\n self.end = 0\n\n def __str__(self):\n return self.prompt\n\n __repr__ = __str__\n\n def __add__(self, other):\n prompt = 'all总共运行了'\n result = []\n for index in range(6):\n result.append(self.lasted[index] + other.lasted[index])\n if result[index]:\n prompt += (str(result[index]) + self.unit[index])\n return prompt\n\n def start(self):\n self.begin = time.localtime()\n self.prompt = '请先停止计时'\n print('开始计时...')\n\n def stop(self):\n if not self.begin:\n print('请先开始计时!')\n return\n self.end = time.localtime()\n self._calc()\n self.begin = 0\n self.end = 0\n print('计时结束...')\n\n def _calc(self):\n self.lasted = []\n self.prompt = '总共运行了'\n for index in range(6):\n self.lasted.append(self.end[index] - self.begin[index])\n if (self.lasted[index]):\n self.prompt += (str(self.lasted[index]) + self.unit[index])\n\n print(self.prompt)\n\n\nt1 = MyTimer()\nt1\nt1.start()\ntime.sleep(5.0)\nt1.stop()\nprint(t1)\n\n\n\nt2 = MyTimer()\nt2\nt2.start()\ntime.sleep(3.0)\nt2.stop()\nprint(t2)\n\n\nprint(t1 + t2)","repo_name":"gee1k/LeanPython3","sub_path":"002/mytimer.py","file_name":"mytimer.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"155676311","text":"import cv2\nimport numpy as np \nfrom matplotlib import pyplot as plt \nimport time\n\nimg = cv2.imread(\"messi6.jpg\")\nimg2gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# B,G,R = cv2.split(img)\n# temp = cv2.merge([R,G,B])\n\nfilt = cv2.GaussianBlur(img,(5,5),0)\n\ndst = cv2.Canny(filt,100,200)\n# while True:\n# # time.sleep(2)\n# cv2.imshow('gray',img2gray)\n# time.sleep(2)\n# break\nplt.subplot(131),plt.imshow(img2gray),plt.title(\"Original(gray)\")\nplt.xticks([]),plt.yticks([])\nplt.subplot(132),plt.imshow(filt),plt.title(\"Gauss——filter\")\nplt.xticks([]),plt.yticks([])\nplt.subplot(133),plt.imshow(dst),plt.title(\"Canny\")\nplt.xticks([]),plt.yticks([])\nplt.show()\n\ncv2.bitwise_and(img,roi,)\n# plt.subplot(131),plt.imshow(temp),plt.title('Original')\n# plt.xticks([]),plt.yticks([])","repo_name":"Tang930817/cv_std","sub_path":"07canny.py","file_name":"07canny.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"30761958224","text":"# https://leetcode.com/problems/compare-version-numbers/\n''' \nclass Solution:\n def compareVersion(self, v1: str, v2: str) -> int:\n i = 0\n j = 0\n while i < len(v1) or j < len(v2):\n n1 = 0\n k = i\n if k < len(v1):\n while k < len(v1) and v1[k] != '.':\n k += 1\n n1 = int(v1[i:k])\n i = k + 1\n \n n2 = 0\n k = j \n if k < len(v2):\n while k < len(v2) and v2[k] != '.':\n k += 1\n n2 = int(v2[j:k])\n j = k + 1\n \n if n1 < n2: return -1\n if n1 > n2: return 1\n \n return 0\n\n# Time: O(N^2)\n# Space: O(1)\n'''\n\nclass Solution:\n def compareVersion(self, version1: str, version2: str) -> int:\n v1 = collections.deque(version1.split(\".\"))\n v2 = collections.deque(version2.split(\".\"))\n \n while v1 or v2:\n v1_val = 0\n v2_val = 0\n if v1: v1_val = int(v1.popleft())\n if v2: v2_val = int(v2.popleft())\n \n if v1_val > v2_val: return 1\n if v1_val < v2_val: return -1\n \n return 0\n \n# Time: O(N) ; as pop from deque is constant time\n# Space: O(N) ; for making v1 and v2\n\n\n","repo_name":"SamirPaulb/DSAlgo","sub_path":"30-Days-SDE-Sheet-Practice/16. Day 16 String Part-II/Compare Version Numbers.py","file_name":"Compare Version Numbers.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":1690,"dataset":"github-code","pt":"28"}
+{"seq_id":"35657551257","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport attrs\nimport quarrel\n\nfrom .. import consts, enums, strings, utils\n\n__all__ = (\"Target\",)\n\nif TYPE_CHECKING:\n from .. import flags, models\n\n\n@attrs.define(weakref_slot=False, auto_attribs=True, kw_only=True, eq=False)\nclass Target:\n nation: models.Nation\n rating: float\n attributes: tuple[TargetAttribute]\n\n def build_embed_field(self) -> quarrel.EmbedField:\n return utils.embed_field(\n strings.target_field_name(self.nation, self.rating),\n strings.target_field_value(self),\n )\n\n @classmethod\n def rate_target(\n cls,\n count: flags.TargetFindCounting,\n rater: models.TargetRater,\n attacker: models.Nation,\n defender: models.Nation,\n ) -> Target:\n attributes: list[TargetAttribute] = []\n for attr in attrs.fields(type(rater)):\n if getattr(count, attr.name, None):\n attr_rater = rater.get_rater(attr.name)\n if attr_rater:\n attributes.append(\n TargetAttribute(\n name=attr.name,\n value=TargetAttribute.get_value(attr.name, defender),\n rating=float(\n utils.evaluate_in_default_scope(\n attr_rater,\n nation=attacker,\n target=defender,\n )\n ),\n )\n )\n return cls(\n nation=defender,\n rating=sum(attr.rating for attr in attributes),\n attributes=tuple(attributes),\n )\n\n\n@attrs.define(weakref_slot=False, auto_attribs=True, kw_only=True, eq=False)\nclass TargetAttribute:\n name: str\n value: str\n rating: float\n\n @classmethod\n def get_value(cls, attr: str, nation: models.Nation) -> str:\n if attr in {\"soldiers\", \"tanks\", \"aircraft\", \"ships\"}:\n value = getattr(nation, attr)\n return f\"{value:,} ({value / (consts.MAX_MIL_PER_CITY[attr] * nation.num_cities):,.2%})\"\n elif attr == \"activity\":\n return strings.datetime_mention(\n nation.last_active, enums.TimestampStyle.SHORT_DATETIME\n )\n elif attr == \"infrastructure\":\n return f\"{nation.average_infrastructure:,.2f}\"\n else:\n return str(getattr(nation, attr))\n","repo_name":"mrvillage/rift","sub_path":"bot/src/models/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"28"}
+{"seq_id":"72688033996","text":"import pybullet as p\nimport numpy as np\nimport gym\nfrom gym import spaces\nfrom pybullet_data import getDataPath\nfrom time import sleep\ntry:\n import importlib.resources as pkg_resources\nexcept ImportError:\n # backported to python<3.7\n import importlib_resources as pkg_resources\n\n\nclass HexapodBulletEnv(gym.Env):\n \"\"\"Hexapod environnement using PyBullet.\"\"\"\n metadata = {\n \"render.modes\": [\"human\", \"rgb_array\"],\n \"video.frames_per_second\": 100,\n }\n\n def __init__(self, time_step=0.05, frameskip=12, render=False):\n \"\"\"\n Init environment.\n\n Args:\n time_step (float, optional): Environment time step in seconds. Defaults to 0.05.\n frameskip (int, optional): Sub steps for physic simulation. Defaults to 12.\n render (bool, optional): Open PyBullet GUI. Defaults to False.\n \"\"\"\n super().__init__()\n\n # Init PyBullet in GUI or DIRECT mode\n self._render = render\n if self._render:\n # Try to connect to PyBullet render server\n cid = p.connect(p.SHARED_MEMORY)\n if (cid < 0):\n # Fail to connect, so launch a new server\n cid = p.connect(p.GUI)\n else:\n p.connect(p.DIRECT)\n\n # 18 actions (servomotors)\n self.n_actions = 18\n self.action_space = spaces.Box(low=-1, high=1,\n shape=(self.n_actions,),\n dtype=\"float32\")\n\n # 18*(position,speed,torque) + robot positions observations + position target\n self.n_observation = 3*18+6+3\n self.observation_space = spaces.Box(low=-1, high=1,\n shape=(self.n_observation,),\n dtype=\"float32\")\n self.observation = np.zeros(self.n_observation, dtype=\"float32\")\n\n # Environment timestep and constants\n self.dt = time_step\n self.frameskip = frameskip\n self.servo_max_speed = 6.308 # rad/s\n self.servo_max_torque = 1.57 # N.m\n\n # Seed random number generator\n self.seed()\n\n # Init world\n p.setTimeStep(self.dt / self.frameskip) # between 0.001 and 0.01 s\n p.resetSimulation()\n p.setGravity(0, 0, -9.81) # Newton's apple\n p.setAdditionalSearchPath(getDataPath()) # Add pybullet_data\n p.loadURDF(\"plane.urdf\") # Load a ground\n\n # Load robot\n flags = p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE\n # flags |= p.URDF_MERGE_FIXED_LINKS # pybullet>2.89\n # flags |= p.URDF_IGNORE_VISUAL_SHAPES # pybullet>2.89, see collisions\n with pkg_resources.path(\"gym_kraby\", \"data\") as path:\n self.robot_id = p.loadURDF(str(path / 'hexapod.urdf'), flags=flags)\n\n # Get all motorized joints id and name (which are revolute joints)\n self.joint_list = [j for j in range(p.getNumJoints(self.robot_id))\n if p.getJointInfo(self.robot_id, j)[2] == p.JOINT_REVOLUTE]\n\n def reset(self):\n # Reset body position/orientation\n p.resetBasePositionAndOrientation(\n self.robot_id,\n [0, 0, 0.2],\n [0, 0, 0, 1],\n )\n\n # Reset all joint using normal distribution\n for j in self.joint_list:\n p.resetJointState(self.robot_id, j,\n np.random.uniform(low=-np.pi/4, high=np.pi/4))\n\n # Set random target and put it in observations\n self.target_position = np.array([1., 0., 0.1]) # FIXME: make it random\n self.observation[-3:] = self.target_position\n\n # Show target as a crosshair\n p.removeAllUserDebugItems()\n p.addUserDebugLine(self.target_position - [0, 0, 0.01],\n self.target_position + [0, 0, 0.01],\n [0, 0, 0], 2)\n p.addUserDebugLine(self.target_position - [0, 0.01, 0],\n self.target_position + [0, 0.01, 0],\n [0, 0, 0], 2)\n\n # Last target distance\n position, _ = p.getBasePositionAndOrientation(self.robot_id)\n self.last_target_distance = np.square(position - self.target_position).sum()\n\n # Return observation\n self._update_observation()\n return self.observation\n\n def step(self, action):\n # Update servomotors\n transformed_action = np.array(action) * self.servo_max_speed\n max_torques = [self.servo_max_torque] * self.n_actions\n p.setJointMotorControlArray(bodyIndex=self.robot_id,\n jointIndices=self.joint_list,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocities=transformed_action,\n forces=max_torques)\n\n # Wait for environment step\n for _ in range(self.frameskip): # step self.dt\n p.stepSimulation()\n if self._render:\n sleep(self.dt / self.frameskip) # realtime\n\n # Return observation, reward and done\n self._update_observation()\n reward = self._get_reward()\n position, _ = p.getBasePositionAndOrientation(self.robot_id)\n done = bool(position[2] < 0.08) # Has fallen?\n return self.observation, reward, done, {}\n\n def render(self, mode='human'):\n \"\"\"\n Render environment.\n\n PyBullet GUI can be disabled in favour of manual RGB rendering.\n\n Args:\n mode (str, optional): Render mode. Defaults to 'human'.\n\n Returns:\n np.ndarray: data array\n \"\"\"\n\n # If not asking for a RGB array, return nothing\n if mode != \"rgb_array\":\n return np.array([])\n\n position = p.getBasePositionAndOrientation(self.robot_id)[0]\n view_matrix = p.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=position,\n distance=0.6,\n yaw=30,\n pitch=-30,\n roll=0,\n upAxisIndex=2,\n )\n proj_matrix = p.computeProjectionMatrixFOV(\n fov=60,\n aspect=960./720,\n nearVal=0.1,\n farVal=100.0,\n )\n _, _, px, _, _ = p.getCameraImage(\n width=960,\n height=720,\n viewMatrix=view_matrix,\n projectionMatrix=proj_matrix,\n renderer=p.ER_TINY_RENDERER,\n )\n rgb_array = np.array(px)\n rgb_array = rgb_array[:, :, :3]\n return rgb_array\n\n def close(self):\n \"\"\"Do nothing as PyBullet automatically closes.\"\"\"\n pass\n\n @staticmethod\n def seed(seed=None):\n \"\"\"Sets the seed for this env's random number generator.\"\"\"\n np.random.seed(seed)\n\n def _get_reward(self):\n \"\"\"Compute reward function.\"\"\"\n # TODO: take into account the inclinaison of base\n # Distance progress toward goal\n position, _ = p.getBasePositionAndOrientation(self.robot_id)\n target_distance = np.square(position - self.target_position).sum()\n diff_distance = self.last_target_distance - target_distance\n self.last_target_distance = target_distance\n\n # Comsuption is speed * torque\n #speeds = self.observation[1:-6:3]\n #torques = self.observation[2:-6:3]\n #comsuption = self.dt * abs(sum(speeds * torques))\n comsuption = 0\n w = 0 # comsuption weight, FIXME: disabled\n\n # Compute reward\n reward = diff_distance - w * comsuption\n return reward\n\n def _update_observation(self):\n \"\"\"\n Update the observation from BulletPhysics.\n\n Observation contains:\n * 18x servomotors {position, speed, torque}\n * robot position and orientation\n * target (x, y, z)\n \"\"\"\n # Each servomotor position, speed and torque\n all_states = p.getJointStates(self.robot_id, self.joint_list)\n for i, (pos, vel, _, tor) in enumerate(all_states):\n self.observation[3*i:3*i+3] = [\n pos * 2 / np.pi,\n np.clip(vel / self.servo_max_speed, -1., 1.),\n np.clip(tor / self.servo_max_torque, -1., 1.),\n ]\n\n # Robot position and orientation\n pos, ori = p.getBasePositionAndOrientation(self.robot_id)\n self.observation[-9:-3] = list(pos) + list(p.getEulerFromQuaternion(ori))\n self.observation[-6:-3] /= np.pi # normalization\n","repo_name":"erdnaxe/kraby","sub_path":"gym_kraby/envs/hexapod_bullet_env.py","file_name":"hexapod_bullet_env.py","file_ext":"py","file_size_in_byte":8509,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"28"}
+{"seq_id":"8708151936","text":"import cx_Freeze\n\nexecutables = [cx_Freeze.Executable(script=\"src/main.py\",\n icon=\"src/resources/icon.ico\",\n targetName=\"Apollo.exe\")]\n\ninclude_files = [\"src\"]\n\npackages = [\"pygame\",\n \"json\"]\n\nexcludes = [\"tkinter\",\n \"numpy\",\n \"OpenGL\"]\n\ncx_Freeze.setup(\n name=\"Apollo\",\n options={\n \"build_exe\": {\n \"packages\": packages,\n \"excludes\": excludes,\n \"include_files\": include_files\n }\n },\n executables=executables\n)\n","repo_name":"Sigton/apollo","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"21950492005","text":"#!/usr/bin/env python3\n# coding=utf-8\n# news.py\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil.parser import parse\nimport cpca\n\ndef get_news():\n nCov_url = r'https://3g.dxy.cn/newh5/view/pneumonia?from=timeline&isappinstalled=0'\n r = requests.get(nCov_url)\n r.encoding = 'utf8'\n b = BeautifulSoup(r.text, features=\"html.parser\")\n l = b.find_all('div', attrs={'class': 'block___wqUAz'})\n for item in l:\n left, right = list(item.children)[0:2]\n title = (list(right.p.strings)[-1].strip().replace(' ', ''))\n publish_time = parse(item.div.get_text().split('前')[-1].replace('月', '-').replace('日', ' '))\n content = right.find_all('p', attrs={'class': 'topicContent___1KVfy'})[0].string.replace(' ', '')\n print(publish_time, title)\n print(cpca.transform([title])[['市', '省']])\n print()\n #print(content)\n\n\nif __name__ == '__main__':\n get_news()\n","repo_name":"frankysu2017/nCoV","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"34796885762","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Recipe\nfrom .forms import RecipeSearchForm\nimport pandas as pd\nfrom .utils import get_chart\n\n# Create your views here.\n\n\ndef home(request):\n return render(request, 'recipes/recipes_home.html')\n\n\nclass RecipeListView(LoginRequiredMixin, ListView):\n model = Recipe\n template_name = 'recipes/main.html'\n\n\nclass RecipeDetailView(LoginRequiredMixin, DetailView):\n model = Recipe\n template_name = 'recipes/detail.html'\n\n\n# KEEP PROTECTED\n\n@login_required\ndef records(request):\n # create an instance of SalesSearchForm that you defined in sales/forms.py\n form = RecipeSearchForm(request.POST or None)\n diff_df = None # intialize dataframe as None\n chart = None\n\n # check if the button is clicked\n if request.method == 'POST':\n # read book_title and chart_type\n recipe_diff = request.POST.get('recipe_diff')\n chart_type = request.POST.get('chart_type')\n # display in terminal - needed for debugging during development only\n # print(recipe_diff, chart_type)\n\n if recipe_diff == '#1':\n recipe_diff = 'Easy'\n if recipe_diff == '#2':\n recipe_diff = 'Medium'\n if recipe_diff == '#3':\n recipe_diff = 'Intermediate'\n if recipe_diff == '#4':\n recipe_diff = 'Hard'\n\n qs = Recipe.objects.all()\n id_list = []\n for obj in qs:\n diff = obj.calc_difficulty()\n if diff == recipe_diff:\n id_list.append(obj.id)\n\n qs = qs.filter(id__in=id_list)\n print(qs)\n if qs:\n diff_df = pd.DataFrame(qs.values())\n\n links = []\n for i, name in enumerate(diff_df['name']):\n name = '' + str(name) + ''\n links.append(name)\n\n chart = get_chart(chart_type, diff_df,\n labels=diff_df['name'].values)\n\n diff_df['name'] = links\n diff_df = diff_df.to_html(index=False, escape=False)\n\n # pack up data to be sent to template in the context dictionary\n context = {\n 'form': form,\n 'diff_df': diff_df,\n 'chart': chart,\n }\n\n # load the sales/record.html page using the data that you just prepared\n return render(request, 'recipes/records.html', context)\n","repo_name":"Carbon-42/recipe-app-deployment","sub_path":"recipes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"32845812452","text":"from util.print_util import iprint\nfrom util.misc_util import ensure_containing_dir\nimport os\nimport subprocess\nfrom config.global_config import global_config\n\nglove_url = \"http://nlp.stanford.edu/data/glove.6B.zip\"\nfile_names = [\"glove.6B.100d.txt\", \"glove.6B.200d.txt\", \"glove.6B.300d.txt\", \"glove.6B.50d.txt\"]\ndest_dir = f'{global_config[\"pretrained_directory\"]}/glove'\n\n'''\nDownload and process GLOVE word vectors. Only needed for LSTM model variants. \n'''\n\ndef main():\n\tiprint(f'Downloading GloVe vectors from {glove_url} to {dest_dir}')\n\tensure_containing_dir(dest_dir)\n\n\tzip_filename = glove_url.split('/')[-1]\n\tzip_filepath = os.path.join(dest_dir, zip_filename)\n\n\tif os.path.exists(zip_filepath):\n\t\tiprint(f'Zip file already exists at {zip_filepath}')\n\telse:\n\t\tiprint(f'Downloading zip file from {glove_url}...')\n\t\tsubprocess.run(['wget', glove_url, '-P', dest_dir])\n\n\tif all([os.path.exists(os.path.join(dest_dir, file_name)) for file_name in file_names]):\n\t\tiprint('Zip file already extracted')\n\telse:\n\t\tiprint('Unzipping zip file')\n\t\tsubprocess.run(['unzip', zip_filepath, '-d', dest_dir])\n\n\tiprint('Done!')\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"ChicagoHAI/learning-from-rationales","sub_path":"processing_scripts/download_glove.py","file_name":"download_glove.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"1281914832","text":"import unittest\nimport random\nimport datetime\nimport re\nfrom twitter.tweet import Tweet\nfrom twitter import template\n\n\ndef tweet(screen_name, text, followers=None, created=None):\n if followers == None:\n followers = random.randint(1, 200)\n if created == None:\n created = str(datetime.datetime.now())\n return Tweet({\n 'created_at': created,\n 'text': text,\n 'user': {\n 'screen_name': screen_name,\n 'followers_count': followers\n }\n })\n\n\nclass TestTweet(unittest.TestCase):\n def test_sorting(self):\n # More popular tweeters come first\n less = tweet('abc', 'something', 20)\n more = tweet('def', 'something', 30)\n [first, second] = sorted([less, more])\n self.assertEqual(first, more)\n self.assertEqual(second, less)\n\n def test_stringify(self):\n t = str(tweet('abc', 'something'))\n self.assertRegexpMatches(t, '@abc')\n self.assertRegexpMatches(t, 'something')\n\n\nclass TestTemplate(unittest.TestCase):\n def setUp(self):\n self.tweets = [tweet('user%d' % i, 'message %d' % i)\n for i in range(100)]\n self.rendered = template.render('xyz', self.tweets)\n\n def test_separators(self):\n self.assertEqual(len(re.findall(' 15:\n print('\\tThis seems to be a great ivenstment that should lead to fantastic returns!')\n\n\n# RUN PROGRAM\n# Prompt for user option\n\nproperty = CocRoiCalc()\n\n\ndef run():\n print('\\nThank you for using our Cash on Cash Return on Investment calculator!\\nWhere would you like to start?')\n\n while True:\n option = input('\\nPlease select from the following options:\\n\\n\\tI: Calculate the Income on my property\\n\\tE: Calculate the Expenses on my property\\n\\n\\tC: Calculate my Cash Flow (Income and Expenses calculations required)\\n\\tR: Calculate my Return on Investment (Cash Flow calculations required)\\n\\n\\tQ: Quit terminal\\n\\nInput: ')\n\n if option.lower() == 'q':\n print('Thank you for visiting our website! We hope to serve you again soon!')\n break\n \n elif option.lower() == 'i':\n property.getIncome()\n \n elif option.lower() == 'e':\n property.getExpense()\n \n elif option.lower() == 'c':\n property.cashFlow()\n \n elif option.lower() == 'r':\n property.investReturn()\n \n else:\n print('\\nPlease select a valid option from the menu.')\n\nrun()\n","repo_name":"corytuggle/rentalCalculatorOOP","sub_path":"rentalCalculator.py","file_name":"rentalCalculator.py","file_ext":"py","file_size_in_byte":10235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"29878949583","text":"import base64\nimport datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom src.model.models import Link, Documento, DocumentoLink\nfrom src.service.DocumentoService import DocumentoService\nfrom src.service.LinkService import LinkService\nfrom src.service.HostService import HostService\nfrom src.service.UtilsService import UtilsService\nfrom src.service.RobotsService import RobotsService\nfrom src.service.StopwordService import StopwordsService\nfrom src.service.DocumentoLinkService import DocumentoLinkService\n\n\nds = DocumentoService()\nls = LinkService()\ndls = DocumentoLinkService()\nhs = HostService()\nus = UtilsService()\nrobotsService = RobotsService()\nstopwordsService = StopwordsService()\n\n\nclass ColetorService:\n urlStringAnterior = None\n sementes = []\n def executar(self):\n documentos = []\n try:\n self.sementes = ls.obterLinksNaoColetados()\n while len(self.sementes) > 0:\n try:\n us.verificaColetaConsecultiva(self.urlStringAnterior, self.sementes[0].url)\n if robotsService.verificaPermissaoRobots(self.sementes[0].url):\n documentos.append(self.coletar(self.sementes[0].url))\n except Exception:\n print('falha a coletar: '+self.sementes[0].url)\n ls.remove(self.sementes[0])\n finally:\n del self.sementes[0]\n print(str(len(self.sementes))+\" Sementes restantes.\")\n except Exception:\n print(\"Erro ao executar o serviço de coleta!\")\n return documentos\n\n def coletar(self, url):\n documento = None\n print('Iniciando coleta url: ['+url+\"]\")\n try:\n documento = Documento()\n requisicao = requests.get(url, verify=True, timeout=5)\n print(\"Código HTTP de resposta: \" + str(requisicao.status_code))\n pagina = requisicao.text\n soup = BeautifulSoup(pagina)\n urls = us.obterLinks(soup)\n\n documento = self.loadOrNewDoc(url, soup, pagina)\n\n self.trataLinksColetados(url, documento, urls)\n\n ds.update(documento)\n except Exception:\n ls.atualizaDataUltimaColeta(url, datetime.datetime.now())\n print(\"Erro ao coletar a página!\")\n finally:\n self.urlStringAnterior = self.sementes[0]\n self.sementes = ls.obterLinksNaoColetados()\n self.sementes = us.removeLinksRepetidos(self.sementes)\n return documento\n\n\n def loadOrNewDoc(self, url, soup, pagina):\n docold = ds.findByUrl(url)\n if(docold is not None):\n documento = docold\n data = base64.b64encode(pagina.encode())\n documento.texto = str(data)\n documento.visao = stopwordsService.tratarVisao(soup)\n self.loadOrNewLink(url, documento)\n ds.update(documento)\n else:\n documentoLink = DocumentoLink()\n documento = Documento()\n documento.url = url\n data = base64.b64encode(pagina.encode())\n documento.texto = str(data)\n documento.visao = stopwordsService.tratarVisao(soup)\n documento = ds.save(documento)\n link = self.loadOrNewLink(url, documento)\n documentoLink.documento = documento\n documentoLink.documento_id = documento.id\n documentoLink.link = link\n documentoLink.link_id = link.id\n dls.save(link)\n\n return documento\n\n\n\n def loadOrNewLink(self, url, documento):\n link = ls.findByUrl(url)\n if link is None:\n link = Link()\n link.url = url\n host = hs.findByUrl(url)\n link.host = host\n link.host_id = host.id\n link.ultimaColeta = datetime.datetime.now()\n link = ls.save(link)\n else:\n link.ultimaColeta = datetime.datetime.now()\n link = ls.update(link)\n return link\n\n def trataLinksColetados(self, url, documento, urls):\n urls = us.removeElementosRepetidos(urls)\n for url in urls:\n if len(url) > 253:\n continue\n if url is not None and url is not '':\n link = ls.inserirSemente(url)\n dls.inserirDocumentoLink(documento, link)\n docLinkList = dls.findByDocId(documento.id)\n print('Finalizando coleta de ['+url+']')\n print('Número de links coletados: ['+str(len(urls))+']')\n print('Tamanho da lista de links: ['+str(len(docLinkList))+']')\n\n\n","repo_name":"godah/Maquina-de-busca-Python","sub_path":"src/service/ColetorService.py","file_name":"ColetorService.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"9149357666","text":"from urllib.request import urlopen\nimport json\nimport mlab\nfrom questions import Questions\n\nmlab.connect()\n\nurl = \"https://opentdb.com/api.php?amount=50\"\nconn = urlopen(url)\n\n### Fetch the content into a string\nraw_data = conn.read()\npage_content = raw_data.decode(\"utf8\")\n# print(type(page_content))\n\n### Convert it into a dict\nd1 = json.loads(page_content)\nresults = d1[\"results\"]\nfor result in results:\n for i in range (len(results)): \n result = Questions(category = results[i][\"category\"], types = results[i][\"type\"], \n difficulty = results[i][\"difficulty\"], question = results[i][\"question\"], correct_answer = results[i][\"correct_answer\"], \n incorrect_answers = results[i][\"incorrect_answers\"])\n result.save()\n\n\n\n","repo_name":"levuhachi/levuhachi-web-c4e23","sub_path":"Web2/homework-bonus/serious_ex.py","file_name":"serious_ex.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"44109028147","text":"import cv2, sys\nimport tensorflow as tf\n\ndef findPhone(img_path, model, scale_percent=70):\n# This function is to locate a phone in the given image by a model\n img_ = cv2.imread(img_path)\n width = int(img_.shape[1] * scale_percent / 100)\n height = int(img_.shape[0] * scale_percent / 100)\n dim = (width, height)\n # Resize the image \n img_ = cv2.resize(img_, dim, interpolation = cv2.INTER_AREA)/255\n # Ge the prediction from the model\n pred = model.predict(tf.expand_dims(img_, axis=0), verbose=False)\n return pred[0]\n\n# Get the image path\nimg_path = sys.argv[1]\n# Get the trained model\nmodel = tf.keras.models.load_model(\"phone_detection.h5\")\n# Get the phone's location\nx, y = findPhone(img_path, model)\n# Print out the location\nprint(f\"{x:.4f} {y:.4f}\")\n","repo_name":"kornsook/phone-detection","sub_path":"find_phone.py","file_name":"find_phone.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"3976654526","text":"# Ao testar sua solução, não se limite ao caso de exemplo.\n\nfrom math import *\n\n# Leitura dos lados do triangulo a, b, and c\na = float(input (\"Lado 1: \"))\nb = float(input (\"Lado 2: \"))\nc = float(input (\"Lado 3: \"))\n\nprint(\"Entradas:\", a, \",\", b, \",\", c)\n\n# Testa se pelo menos uma das entradas eh negativa \nif (a > 0 and b > 0 and c > 0):\n\t\n\t# Testa se medidas correspondem aas de um triangulo\n\tif (a + b > c and a + c > c and c + b > a): #And, todas precisam ser verdadeiras ao mesmo tempo.\n\t\ts = (a + b + c) / 2.0\n\t\tarea = sqrt(s * (s-a) * (s-b) * (s-c))\n\t\tprint(\"Area:\" ,round(area, 3)) #acredito que a esssa altura o round ta aí tranquilão\n\telse:\n\t\tprint(\"Area: invalida\")\nelse:\n\tprint(\"Area: invalida\")\n#lembrar que o if casa com seu respectivo else , no caso o primeiro if \"pareia\" com o ultimo else\n#pois se um numero de entrada for negativo automaticamente cai no else sem passar pelo segundo if\n#e só.","repo_name":"JosephLevinthal/Research-projects","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4464/codes/1692_1079.py","file_name":"1692_1079.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"36157538866","text":"\"\"\"Попытка конкурентного выполнения счетного кода (bad practice)\"\"\"\n\n\nimport asyncio\n\nfrom src.util import async_timed\n\n\n@async_timed()\nasync def cpu_bound_work() -> int:\n counter = 0\n for _ in range(100000000):\n counter = counter + 1\n return counter\n\n\n@async_timed()\nasync def async_main():\n task_one = asyncio.create_task(cpu_bound_work())\n task_two = asyncio.create_task(cpu_bound_work())\n await task_one\n await task_two\n\n\ndef main():\n asyncio.run(async_main())\n","repo_name":"mspiridonov2706/async_learning","sub_path":"src/chapter_2/listing_2_18.py","file_name":"listing_2_18.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"36914216956","text":"import os\nimport sys\nimport copy\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport joblib\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom torchmetrics.functional.regression.r2 import r2_score\nfrom torchmetrics.functional.regression.mse import mean_squared_error\nfrom torchmetrics.functional.regression.mae import mean_absolute_error\nfrom torchmetrics.functional.regression.mape import mean_absolute_percentage_error\n\nSEED = 42\nSAVE = False\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\npd.set_option(\"display.max_rows\", None)\n\nRESPONSE_VARIABLES = [\"TS\", \"WVP\", \"%E\"]\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nmodel_folder = os.path.join(current_dir, \"../trained_models/wvp\")\n\n\nclass ModelDataset(Dataset):\n \"\"\"\n Prepare the dataset for regression\n \"\"\"\n\n def __init__(self, X, y):\n self.X = torch.tensor(X, dtype=torch.float32)\n self.y = torch.tensor(y, dtype=torch.float32)\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, i):\n return self.X[i], self.y[i]\n\n\nclass MLP(nn.Module):\n \"\"\"\n Multilayer Perceptron for regression.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Linear(9, 24),\n nn.Tanh(),\n nn.Linear(24, 22),\n nn.LeakyReLU(),\n nn.Linear(22, 9),\n nn.Tanh(),\n nn.Linear(9, 1),\n )\n\n def forward(self, x):\n \"\"\"\n Forward pass\n \"\"\"\n return self.layers(x)\n\n\ndef compute_mre(y_pred, y_true):\n \"\"\"\n Compute the mean relative error\n \"\"\"\n return (np.abs(y_true - y_pred)) / y_true\n\n\ndef main(BATCH_SIZE, NUM_EPOCHS, TRAIN_SIZE, WEIGHT_DECAY, LEARNING_RATE):\n TRAIN_DATA_PATH = os.environ.get(\"TRAIN_DATA_PATH\")\n TEST_DATA_PATH = os.environ.get(\"TEST_DATA_PATH\")\n SCALER_PATH = os.environ.get(\"SCALER_PATH\")\n\n\nif __name__ == \"__main__\":\n BATCH_SIZE = 64\n NUM_EPOCHS = 1000\n TRAIN_SIZE = 0.7\n WEIGHT_DECAY = 0.01\n LEARNING_RATE = 0.001\n\n main(BATCH_SIZE, NUM_EPOCHS, TRAIN_SIZE, WEIGHT_DECAY, LEARNING_RATE)\n","repo_name":"JorgeO3/tesisV4","sub_path":"etc/wvp_model.py","file_name":"wvp_model.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"17154773623","text":"# Default Values for optional arguments.\r\ndef my_function(a=1, b=2, c=3):\r\n new_num = a + b + c\r\n return new_num\r\n\r\n# no arguments needed.\r\ndefault_value = my_function()\r\nprint(default_value)\r\n\r\n# keyword arguments can be used to change the values.\r\noptional_value = my_function(b=3)\r\nprint(optional_value)","repo_name":"deemedpydeveloper/Py_Intermediate_Level","sub_path":"13.Tkinter,GUI & Python_Arguments/1.arguments_with_default_values.py","file_name":"1.arguments_with_default_values.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"70621740555","text":"from chainerex.training.triggers.early_stopping_trigger import EarlyStoppingTrigger\n\n\nclass IterativeEarlyStoppingManager(object):\n \"\"\"\n \n Examples:\n from chainerex.training.triggers import IterativeEarlyStoppingManager\n\n iesm = IterativeEarlyStoppingManager()\n trainer = Trainer(updater, stop_trigger=iesm.stop_trigger)\n \n schedule_lr_list = [0.1, 0.001]\n def extension_fn(trainer):\n index = iesm.iterate_count\n optimizer.lr = schedule_lr_list[index]\n trainer.extend(extension_fn, trigger=iesm.extension_trigger)\n\n \"\"\"\n\n def increment_iterate_count(self, trainer):\n self.iterate_count += 1\n if self.verbose:\n print('updating count to {}'.format(self.iterate_count))\n\n def __init__(self, max_iterate_count=-1,\n trigger=(1, 'epoch'), monitor='main/loss', patients=3,\n mode='auto', verbose=False, max_epoch=100, debug=False):\n self.extension_trigger = EarlyStoppingTrigger(\n trigger=trigger, monitor=monitor, patients=patients,\n mode=mode, verbose=verbose, max_epoch=max_epoch, debug=debug)\n self.extension_trigger.set_on_condition_listener(\n self.increment_iterate_count\n )\n self.verbose = verbose\n self.max_epoch = max_epoch\n self.stop_trigger = self.stop_condition\n self.max_iterate_count = max_iterate_count\n self.iterate_count = 0\n\n def stop_condition(self, trainer):\n # 1. Check epoch\n if self.max_epoch >=0 and trainer.updater.epoch_detail >= self.max_epoch:\n return True\n\n # 2. Check iterative count\n if self.max_iterate_count >=0 and self.max_iterate_count > self.iterate_count:\n return True\n return False\n\n @property\n def iterate_index(self):\n return self.iterate_count - 1\n","repo_name":"corochann/chainerex","sub_path":"chainerex/training/triggers/iterative_early_stopping_manager.py","file_name":"iterative_early_stopping_manager.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"22509301926","text":"__author__ = 'Hillel'\r\n\"\"\"\r\nThis module contains the constants needed for any configuration of the session\r\n\"\"\"\r\n# =====================imports=====================#\r\n\r\nimport os\r\n\r\n# =====================constants===================#\r\n\r\n# region Condition Constants\r\nCONDITIONS = {\r\n \"Exists\": os.path.exists,\r\n \"Bool\": lambda x: (x == \"True\" or x == \"False\") and eval(x) # for sanity and security reasons.\r\n}\r\n\r\nNEXT_STEPS_SEPARATOR = \";\"\r\nSTEP_CONDITION_SEPARATOR = \",\"\r\nIN_CONDITION_SEPARATOR = \" \"\r\n# endregion\r\n\r\n\r\n# region Sections and Options\r\nMAX_STEP_PROCESSES_OPTION = 'max_processes_num'\r\nOVERALL_MAX_PS_OPTION = 'overall_max_processes_num'\r\nNEXT_STEP_OPTION = \"next_step\"\r\nERROR_STEP_OPTION = \"error_step\"\r\nSTEP_TYPE_OPTION = \"Type\"\r\nSTEP_NAME_OPTION = \"name\"\r\nPROGRAM_NAME_OPTION = \"program\"\r\nPROGRAM_PARAMS_OPTION = \"parameters\"\r\nCONSTRAIN_COND_OPTION = \"constraint\"\r\n\r\nENABLE_SECTION_OPTION = \"enable\"\r\nRECOVER_SECTION = \"Recover\"\r\n\r\n# The default name for the first step and error step\r\nFIRST_STEP = \"Step_1\"\r\nERROR_STEP = \"Step_-1\"\r\nSTEP_SEMI_CONDITION = None\r\n# endregion\r\n","repo_name":"GiliWolf/Hyper_Editing","sub_path":"Tools/EditingIndex/EIPipeline/ConfigConsts.py","file_name":"ConfigConsts.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"7953359458","text":"from django.urls import path\r\nfrom . import views\r\n\r\napp_name='codagram'\r\nurlpatterns=[\r\n path('', views.index, name='index'),\r\n path('postlist_new/', views.postlist_new, name='postlist_new'),\r\n path('reset_alarm/', views.reset_alarm, name='reset_alarm'),\r\n \r\n path('denine/', views.denine_404, name='denine_404'),\r\n\r\n]","repo_name":"aivle-SDG21/MiniProject1-DjangoWebApp_n","sub_path":"codagram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"24008051480","text":"import os\nimport numpy as np\nfrom sklearn import preprocessing\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef check_exist_face(uuid):\n face_root = '../../MSP-IMPROV_feature/face/raw'\n return os.path.exists(os.path.join(face_root, uuid+'.npy'))\n\ndef make_label():\n save_root = '../../MSP-IMPROV_feature/target'\n if not os.path.exists(save_root):\n os.makedirs(save_root)\n label_file = '../Evalution.txt'\n lines = open(label_file).readlines()\n records = list(filter(lambda x: '.avi' in x, lines))\n records = list(filter(lambda x: x.split('-')[4]=='S', records))\n int2name = []\n label = []\n label_set = ['A', 'H', 'N', 'S']\n no_face_count = 0\n no_face_record = []\n for record in records:\n uuid, label_name = record.split(';')[:2]\n uuid = uuid.strip().replace('UTD', 'MSP').split('.')[0]\n label_name = label_name.strip()\n if label_name in label_set and check_exist_face(uuid):\n int2name.append(uuid)\n label.append(label_set.index(label_name))\n else:\n print(uuid, label_name, check_exist_face(uuid))\n if not check_exist_face(uuid) and label_name in label_set:\n no_face_count += 1\n no_face_record.append(f'{uuid} {label_name} {check_exist_face(uuid)}\\n')\n \n print(\"No face:\", no_face_count)\n f = open('no_face_check.txt', 'w')\n f.writelines(no_face_record)\n \n int2name = np.array(int2name)\n label = np.array(label)\n int2name_path = os.path.join(save_root, 'all_int2name.npy')\n label_path = os.path.join(save_root, 'all_label.npy')\n print('int2name:', int2name.shape)\n print('label:', label.shape)\n np.save(int2name_path, int2name)\n np.save(label_path, label)\n\n for spk in ['M01', 'F01', 'M02', 'F02', 'M03', 'F03',\n 'M04', 'F04', 'M05', 'F05', 'M06', 'F06']:\n spk_int2name = []\n spk_label = []\n for uuid, _label in zip(int2name, label):\n spkid = uuid.split('-')[3]\n if spkid == spk:\n spk_int2name.append(uuid)\n spk_label.append(_label)\n spk_int2name = np.array(spk_int2name)\n spk_label = np.array(spk_label)\n print(spk, 'int2name', spk_int2name.shape)\n print(spk, 'label', spk_label.shape)\n spk_int2name_path = os.path.join(save_root, f'{spk}_int2name.npy')\n spk_label_path = os.path.join(save_root, f'{spk}_label.npy')\n np.save(spk_int2name_path, spk_int2name)\n np.save(spk_label_path, spk_label)\n\ndef statistic():\n int2name = np.load('../../MSP-IMPROV_feature/target/all_int2name.npy')\n label = np.load('../../MSP-IMPROV_feature/target/all_label.npy')\n # face_frames\n face_feature_root = '../../MSP-IMPROV_feature/face/raw'\n face_lengths = []\n for uuid in int2name:\n face_feature_path = os.path.join(face_feature_root, uuid + '.npy')\n face_feature = np.load(face_feature_path)\n if len(face_feature) != 0:\n assert face_feature.shape[1] == 342\n face_lengths.append(face_feature.shape[0])\n else:\n print(uuid, 'has zero length')\n \n face_lengths.sort()\n _min = min(face_lengths)\n _max = max(face_lengths)\n mean = sum(face_lengths) / len(face_lengths)\n mid = face_lengths[int(len(face_lengths)/2)]\n sp_75 = face_lengths[int(len(face_lengths)*0.75)]\n print('Face:')\n print(f'Min:{_min} Max:{_max} Mean:{mean} Mid:{mid} 75%:{sp_75}')\n\n # word\n text_feature_root = '../../MSP-IMPROV_feature/text/raw'\n text_lengths = []\n for uuid in int2name:\n text_feature_path = os.path.join(text_feature_root, uuid + '.npy')\n text_feature = np.load(text_feature_path)\n if len(text_feature) != 0:\n assert text_feature.shape[1] == 1024\n text_lengths.append(text_feature.shape[0])\n \n text_lengths.sort()\n _min = min(text_lengths)\n _max = max(text_lengths)\n mean = sum(text_lengths) / len(text_lengths)\n mid = text_lengths[int(len(text_lengths)/2)]\n sp_75 = text_lengths[int(len(text_lengths)*0.75)]\n print('Text:')\n print(f'Min:{_min} Max:{_max} Mean:{mean} Mid:{mid} 75%:{sp_75}')\n\ndef statis_emo():\n all_label = np.load('../../MSP-IMPROV_feature/target/all_label.npy')\n record = {\n 0:0, 1:0, 2:0, 3:0\n }\n for label in all_label:\n record[label] += 1\n print(len(all_label))\n print(record)\n\ndef gather(): \n face_len = 40\n text_len = 23\n for spk in ['M01', 'F01', 'M02', 'F02', 'M03', 'F03',\n 'M04', 'F04', 'M05', 'F05', 'M06', 'F06']:\n\n int2name = np.load(f'../../MSP-IMPROV_feature/target/{spk}_int2name.npy')\n \n # audio\n save_path = f'../../MSP-IMPROV_feature/audio/IS10_{spk}.npy'\n feats = []\n for uuid in int2name:\n feat_file = '../../MSP-IMPROV_feature/audio/raw/' + uuid + '.npy'\n feat = np.load(feat_file)\n feats.append(feat)\n feats = np.array(feats)\n print('Audio total:', feats.shape)\n np.save(save_path, feats)\n\n # visual\n save_path = f'../../MSP-IMPROV_feature/face/denseface_{spk}.npy'\n feats = []\n for uuid in int2name:\n feat_file = '../../MSP-IMPROV_feature/face/raw/' + uuid + '.npy'\n feat = np.load(feat_file)\n if len(feat) >= face_len:\n feat = feat[:face_len]\n else:\n feat = np.concatenate([feat, np.zeros([face_len-len(feat), 342])])\n feats.append(feat)\n feats = np.array(feats)\n print('Visual total:', feats.shape)\n np.save(save_path, feats)\n\n # text\n save_path = f'../../MSP-IMPROV_feature/text/bert_{spk}.npy'\n feats = []\n for uuid in int2name:\n feat_file = '../../MSP-IMPROV_feature/text/raw/' + uuid + '.npy'\n feat = np.load(feat_file)\n if len(feat) >= text_len:\n feat = feat[:text_len]\n else:\n feat = np.concatenate([feat, np.zeros([text_len-len(feat), 1024])])\n feats.append(feat)\n feats = np.array(feats)\n print('Text total:', feats.shape)\n np.save(save_path, feats)\n\n\ndef make_cv_level_target():\n root = '/data6/lrc/MSP-IMPROV_feature/target/spk_level'\n save_root = '/data6/lrc/MSP-IMPROV_feature/target/cv_level'\n for cv in range(1, 13):\n val_gender = 'M' if cv % 2 == 1 else \"F\"\n val_num = (cv+1) // 2\n tst_gender = 'F' if cv % 2 == 1 else \"M\"\n tst_num = (cv+1) // 2\n val_spk = f'{val_gender}0{val_num}'\n tst_spk = f'{tst_gender}0{tst_num}'\n print('CV:', cv)\n print('val:', val_spk, 'tst:', tst_spk)\n trn_label, trn_int2name = [], []\n val_label, val_int2name = [], []\n tst_label, tst_int2name = [], []\n for spk in ['M01', 'F01', 'M02', 'F02', 'M03', 'F03',\n 'M04', 'F04', 'M05', 'F05', 'M06', 'F06']:\n label = np.load(os.path.join(root, f'{spk}_label.npy'))\n int2name = np.load(os.path.join(root, f'{spk}_int2name.npy'))\n if spk == val_spk:\n val_label.append(label)\n val_int2name.append(int2name)\n elif spk == tst_spk:\n tst_label.append(label)\n tst_int2name.append(int2name)\n else:\n trn_label.append(label)\n trn_int2name.append(int2name)\n trn_label = np.concatenate(trn_label, axis=0)\n trn_int2name = np.concatenate(trn_int2name, axis=0)\n val_label = np.concatenate(val_label, axis=0)\n val_int2name = np.concatenate(val_int2name, axis=0)\n tst_label = np.concatenate(tst_label, axis=0)\n tst_int2name = np.concatenate(tst_int2name, axis=0)\n save_dir = os.path.join(save_root, str(cv))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n np.save(os.path.join(save_dir, 'trn_label.npy'), trn_label)\n np.save(os.path.join(save_dir, 'trn_int2name.npy'), trn_int2name)\n np.save(os.path.join(save_dir, 'val_label.npy'), val_label)\n np.save(os.path.join(save_dir, 'val_int2name.npy'), val_int2name)\n np.save(os.path.join(save_dir, 'tst_label.npy'), tst_label)\n np.save(os.path.join(save_dir, 'tst_int2name.npy'), tst_int2name)\n print('Trn:', trn_label.shape)\n print('Val:', val_label.shape)\n print('Tst:', tst_label.shape)\n assert(len(trn_label) == len(trn_int2name))\n assert(len(val_label) == len(val_int2name))\n assert(len(tst_label) == len(tst_int2name))\n\ndef make_cv_level_feature(modality):\n feat_name = {\n 'audio': 'IS10',\n 'face': 'denseface',\n 'text': 'bert'\n }\n root = f'/data6/lrc/MSP-IMPROV_feature/{modality}/spk_level'\n save_root = f'/data6/lrc/MSP-IMPROV_feature/{modality}/cv_level'\n for cv in range(1, 13):\n val_gender = 'M' if cv % 2 == 1 else \"F\"\n val_num = (cv+1) // 2\n tst_gender = 'F' if cv % 2 == 1 else \"M\"\n tst_num = (cv+1) // 2\n val_spk = f'{val_gender}0{val_num}'\n tst_spk = f'{tst_gender}0{tst_num}'\n print('CV:', cv)\n print('val:', val_spk, 'tst:', tst_spk)\n trn_feat, val_feat, tst_feat = [], [], []\n for spk in ['M01', 'F01', 'M02', 'F02', 'M03', 'F03',\n 'M04', 'F04', 'M05', 'F05', 'M06', 'F06']:\n feat = np.load(os.path.join(root, f'{feat_name[modality]}_{spk}.npy'))\n if spk == val_spk:\n val_feat.append(feat)\n elif spk == tst_spk:\n tst_feat.append(feat)\n else:\n trn_feat.append(feat)\n trn_feat = np.concatenate(trn_feat, axis=0)\n val_feat = np.concatenate(val_feat, axis=0)\n tst_feat = np.concatenate(tst_feat, axis=0)\n if modality == 'audio':\n scaler = preprocessing.StandardScaler().fit(trn_feat)\n trn_feat = scaler.transform(trn_feat)\n val_feat = scaler.transform(val_feat)\n tst_feat = scaler.transform(tst_feat)\n\n save_dir = os.path.join(save_root, str(cv))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n np.save(os.path.join(save_dir, 'trn.npy'), trn_feat)\n np.save(os.path.join(save_dir, 'val.npy'), val_feat)\n np.save(os.path.join(save_dir, 'tst.npy'), tst_feat)\n print('Trn:', trn_feat.shape)\n print('Val:', val_feat.shape)\n print('Tst:', tst_feat.shape)\n\n\ndef make_miss_modality_mix_data(src, tgt, label_dir, new_label_dir, cv=1, phase='val'):\n cv = str(cv)\n A = np.load(os.path.join(src, 'audio/cv_level', cv, phase + '.npy'))\n V = np.load(os.path.join(src, 'face/cv_level', cv, phase + '.npy'))\n L = np.load(os.path.join(src, 'text/cv_level', cv, phase + '.npy'))\n LABEL = np.load(os.path.join(label_dir, cv, phase + '_label.npy'))\n INT2NAME = np.load(os.path.join(label_dir, cv, phase + '_int2name.npy'))\n new_A = []\n new_V = []\n new_L = []\n new_label = []\n new_int2name = []\n miss_type = []\n modalities = ['a', 'v', 'l']\n for a, v, l, label, int2name in zip(A, V, L, LABEL, INT2NAME):\n a = np.expand_dims(a, 0)\n v = np.expand_dims(v, 0)\n l = np.expand_dims(l, 0)\n # A + Z + Z\n new_A.append(a)\n new_V.append(np.zeros(v.shape))\n new_L.append(np.zeros(l.shape))\n new_label.append(label)\n new_int2name.append(int2name)\n miss_type.append('azz')\n # Z + V + Z\n new_A.append(np.zeros(a.shape))\n new_V.append(v)\n new_L.append(np.zeros(l.shape))\n new_label.append(label)\n new_int2name.append(int2name)\n miss_type.append('zvz')\n # Z + Z + L\n new_A.append(np.zeros(a.shape))\n new_V.append(np.zeros(v.shape))\n new_L.append(l)\n new_label.append(label)\n new_int2name.append(int2name)\n miss_type.append('zzl')\n # A + V + Z\n new_A.append(a)\n new_V.append(v)\n new_L.append(np.zeros(l.shape))\n new_label.append(label)\n new_int2name.append(int2name)\n miss_type.append('avz')\n # A + Z + L\n new_A.append(a)\n new_V.append(np.zeros(v.shape))\n new_L.append(l)\n new_label.append(label)\n new_int2name.append(int2name)\n miss_type.append('azl')\n # Z + V + L\n new_A.append(np.zeros(a.shape))\n new_V.append(v)\n new_L.append(l)\n new_label.append(label)\n new_int2name.append(int2name)\n miss_type.append('zvl')\n\n new_A = np.vstack(new_A)\n new_V = np.vstack(new_V)\n new_L = np.vstack(new_L)\n new_label = np.vstack(new_label)\n new_int2name = np.array(new_int2name)\n miss_type = np.array(miss_type)\n save_a = os.path.join(tgt, 'audio/miss', cv)\n save_v = os.path.join(tgt, 'face/miss', cv)\n save_l = os.path.join(tgt, 'text/miss', cv)\n save_target = os.path.join(new_label_dir, cv)\n mkdir(save_a)\n mkdir(save_v)\n mkdir(save_l)\n mkdir(save_target)\n save_a = os.path.join(save_a, phase + '.npy')\n save_l = os.path.join(save_l, phase + '.npy')\n save_v = os.path.join(save_v, phase + '.npy')\n save_label = os.path.join(save_target, phase + '_label.npy')\n save_int2name = os.path.join(save_target, phase + '_int2name.npy')\n save_miss_type = os.path.join(save_target, phase + '_type.npy')\n print('Save to ' + save_a)\n print('Save to ' + save_v)\n print('Save to ' + save_l)\n print('Save to ' + save_label)\n print('Save to ' + save_int2name)\n print('Save to ' + save_miss_type)\n np.save(save_a, new_A)\n np.save(save_v, new_V)\n np.save(save_l, new_L)\n np.save(save_label, new_label)\n np.save(save_int2name, new_int2name)\n np.save(save_miss_type, miss_type)\n\n\ndef check_data(root):\n cv = '3'\n phase = 'val'\n A = np.load(os.path.join(root, 'audio/miss', cv, phase + '.npy'))\n V = np.load(os.path.join(root, 'face/miss', cv, phase + '.npy'))\n L = np.load(os.path.join(root, 'text/miss', cv, phase + '.npy'))\n print(os.path.join(root, 'audio/miss', cv, phase + '.npy'))\n print(os.path.join(root, 'face/miss', cv, phase + '.npy'))\n print(os.path.join(root, 'text/miss', cv, phase + '.npy'))\n input()\n for a, v, l in zip(A, V, L):\n print(a.shape, np.sum(a))\n print(v.shape, np.sum(v))\n print(l.shape, np.sum(l))\n input()\n\n# make_label()\n# statistic()\n# gather()\n# make_cv_level_target()\n# make_cv_level_feature('audio')\n# make_cv_level_feature('face')\n# make_cv_level_feature('text')\n\n# src = '/data6/lrc/MSP-IMPROV_feature/'\n# tgt = '/data6/lrc/MSP-IMPROV_feature/'\n# label_dir = '/data6/lrc/MSP-IMPROV_feature/target/cv_level'\n# new_label_dir = '/data6/lrc/MSP-IMPROV_feature/target/miss'\n# for cv in range(1, 13):\n# for phase in ['val', 'tst']:\n# make_miss_modality_mix_data(src, tgt, label_dir, new_label_dir, cv, phase)\n# check_data('/data6/lrc/MSP-IMPROV_feature/')\n\nstatis_emo()","repo_name":"AIM3-RUC/MEmoBert","sub_path":"preprocess/msp/arrange_data.py","file_name":"arrange_data.py","file_ext":"py","file_size_in_byte":15043,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"}
+{"seq_id":"38936265661","text":"import sys\nfrom utilities import sequence_to_latex, steps_to_latex, get_cube_state, permutations\n\ndef split_reconstruction(recon):\n recon = recon.strip()\n lines = [line.strip() for line in recon.split('\\n')]\n # lines = ['title', 'scramble', '', 'step 1 // comment 1', 'step 2 // comment 2', '', 'solution']\n title, scramble = lines[0], lines[1]\n\n if len(lines)>2 and lines[2]=='':\n lines = lines[3:]\n if '' in lines:\n steps = lines[:lines.index('')]\n solution = lines[-1]\n return title, scramble, steps, solution\n return title, scramble, lines, None\n return title, scramble, None, None\n\ndef box_to_latex(title, scramble, steps=None, solution=None, draw_func=\"DrawCube\"):\n latex_str = \"\\\\bigskip\\n\\\\begin{tabular}{|p{0.968\\\\linewidth}|}\\n\\\\hline\\n\\\\textbf{\" + title + \"\"\"}\\\\\\\\\\n\\\\hline\nScramble: \"\"\" + sequence_to_latex(scramble) + \"\\\\\\\\\\n\\\\hline\"\n if steps is not None:\n latex_str += \"\\\\\\\\\\n\\\\begin{minipage}[l]{0.75\\\\linewidth}\\n\" + steps_to_latex(steps) + \"\\\\end{minipage}\"\n latex_str += \"\\n\\\\begin{minipage}[c]{0.20\\\\linewidth}\\n\\\\\"+ draw_func + \"{3}{2}\\n\\\\end{minipage}\\\\\\\\\"\n if solution is not None:\n latex_str += \"\"\"\\n\\\\hline\\nSolution: \"\"\" + sequence_to_latex(solution) + \"\\\\\\\\\"\n latex_str += \"\\n\\\\hline\\n\\\\end{tabular}\\n\\\\bigskip\\\\\\\\\"\n\n return latex_str\n\n# import reconstructions\nif int(sys.argv[3])==0:\n from reconstructions_3x3 import reconstructions\nelif int(sys.argv[3])==1:\n from reconstructions_FMC import reconstructions\nelse:\n from reconstructions_3x3 import reconstructions\n \nrecon = reconstructions[int(sys.argv[1])]\ndraw_func = \"DrawCube\" if int(sys.argv[2]) else \"DrawSimpleCube\"\ntitle, scr, steps, sol = split_reconstruction(recon)\n\ncube_state = get_cube_state('solved')\n\n# apply the moves in the scramble\nfor move in scr.split():\n if move == '':\n pass\n cube_state = [cube_state[i] for i in permutations[move]]\n\n# set the cube state\nprint('\\\\xdef\\\\myarray{{\"' + '\",\"'.join(cube_state) + '\"}}')\n\n# do the reconstruction\nprint(box_to_latex(title, scr, steps, sol, draw_func))\n","repo_name":"martintufte/rubiks_cube","sub_path":"code/print_box.py","file_name":"print_box.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"16451327698","text":"import torch\r\nimport copy\r\nimport net_plotter\r\nimport h5py\r\nimport numpy as np\r\nimport scheduler\r\nimport time\r\nimport torch.nn as nn\r\nfrom torch.autograd.variable import Variable\r\nimport torch.nn.functional as F\r\nimport argparse\r\nimport utils_\r\nimport torchvision.datasets as datasets\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom matplotlib import cm\r\n\r\ndef name_surface_file(args, dir_file):\r\n # use args.dir_file as the perfix\r\n surf_file = dir_file\r\n\r\n # resolution\r\n surf_file += '_[%s,%s,%d]' % (str(args.xmin), str(args.xmax), int(args.xnum))\r\n surf_file += 'x[%s,%s,%d]' % (str(args.ymin), str(args.ymax), int(args.ynum))\r\n\r\n return surf_file + \".h5\"\r\n\r\ndef setup_surface_file(args, surf_file, dir_file):\r\n try:\r\n f = h5py.File(surf_file, 'a')\r\n f['dir_file'] = dir_file\r\n \r\n # Create the coordinates(resolutions) at which the function is evaluated\r\n xcoordinates = np.linspace(args.xmin, args.xmax, num=int(args.xnum))\r\n f['xcoordinates'] = xcoordinates\r\n \r\n ycoordinates = np.linspace(args.ymin, args.ymax, num=int(args.ynum))\r\n f['ycoordinates'] = ycoordinates\r\n f.close()\r\n except:\r\n pass\r\n\r\n return surf_file\r\n\r\ndef eval_loss(net, criterion, loader, use_cuda=False):\r\n correct = 0\r\n total_loss = 0\r\n total = 0 # number of samples\r\n\r\n if use_cuda:\r\n net.cuda()\r\n net.eval()\r\n\r\n with torch.no_grad():\r\n if isinstance(criterion, nn.CrossEntropyLoss):\r\n for batch_idx, (inputs, targets) in enumerate(loader):\r\n batch_size = inputs.size(0)\r\n total += batch_size\r\n if use_cuda:\r\n inputs, targets = inputs.cuda(), targets.cuda()\r\n outputs = net(inputs)[0]\r\n loss = criterion(outputs, targets)\r\n total_loss += loss.item()*batch_size\r\n _, predicted = torch.max(outputs.data, 1)\r\n correct += predicted.eq(targets).sum().item()\r\n\r\n elif isinstance(criterion, nn.MSELoss):\r\n for batch_idx, (inputs, targets) in enumerate(loader):\r\n batch_size = inputs.size(0)\r\n total += batch_size\r\n inputs = Variable(inputs)\r\n\r\n one_hot_targets = torch.FloatTensor(batch_size, 10).zero_()\r\n one_hot_targets = one_hot_targets.scatter_(1, targets.view(batch_size, 1), 1.0)\r\n one_hot_targets = one_hot_targets.float()\r\n one_hot_targets = Variable(one_hot_targets)\r\n if use_cuda:\r\n inputs, one_hot_targets = inputs.cuda(), one_hot_targets.cuda()\r\n outputs = F.softmax(net(inputs))\r\n loss = criterion(outputs, one_hot_targets)\r\n total_loss += loss.item()*batch_size\r\n _, predicted = torch.max(outputs.data, 1)\r\n correct += predicted.cpu().eq(targets).sum().item()\r\n\r\n return total_loss/total, 100.*correct/total\r\n\r\n\r\ndef crunch(surf_file, net, w, s, d, dataloader, loss_key, acc_key, args):\r\n\r\n f = h5py.File(surf_file, 'r+')\r\n losses, accuracies = [], []\r\n xcoordinates = f['xcoordinates'][:]\r\n ycoordinates = f['ycoordinates'][:]\r\n\r\n if loss_key not in f.keys():\r\n shape = (len(xcoordinates),len(ycoordinates))\r\n losses = -np.ones(shape=shape)\r\n accuracies = -np.ones(shape=shape)\r\n f[loss_key] = losses\r\n f[acc_key] = accuracies\r\n \r\n else:\r\n losses = f[loss_key][:]\r\n accuracies = f[acc_key][:]\r\n\r\n # Generate a list of indices of 'losses' that need to be filled in.\r\n # The coordinates of each unfilled index (with respect to the direction vectors\r\n # stored in 'd') are stored in 'coords'.\r\n inds, coords, inds_nums = scheduler.get_job_indices(losses, xcoordinates, ycoordinates)\r\n\r\n print('Computing %d values'% (len(inds)))\r\n start_time = time.time()\r\n total_sync = 0.0\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n # if args.loss_name == 'mse':\r\n # criterion = nn.MSELoss()\r\n\r\n # Loop over all uncalculated loss values\r\n for count, ind in enumerate(inds):\r\n # Get the coordinates of the loss value being calculated\r\n coord = coords[count]\r\n\r\n # Load the weights corresponding to those coordinates into the net\r\n\r\n net_plotter.set_weights(net, w, d, coord)\r\n\r\n\r\n # Record the time to compute the loss value\r\n loss_start = time.time()\r\n loss, acc = eval_loss(net, criterion, dataloader, args.cuda)\r\n loss_compute_time = time.time() - loss_start\r\n print(loss_compute_time)\r\n\r\n # Record the result in the local array\r\n losses.ravel()[ind] = loss\r\n accuracies.ravel()[ind] = acc\r\n\r\n f[loss_key][:] = losses\r\n f[acc_key][:] = accuracies\r\n print(accuracies)\r\n f.flush()\r\n\r\n f.close()\r\n \r\ndef plot_2d_contour(surf_file, surf_name='valid_acc', vmin=0.1, vmax=25, vlevel=1.0, show=False):\r\n \"\"\"Plot 2D contour map and 3D surface.\"\"\"\r\n\r\n f = h5py.File(surf_file, 'r')\r\n x = np.array(f['xcoordinates'][:])\r\n y = np.array(f['ycoordinates'][:])\r\n X, Y = np.meshgrid(x, y)\r\n\r\n if surf_name in f.keys():\r\n Z = np.array(f[surf_name][:])\r\n elif surf_name == 'valid_err':\r\n Z = 100 - np.array(f['valid_acc'][:])\r\n elif surf_name == 'train_err' or surf_name == 'valid_err' :\r\n Z = 100 - np.array(f[surf_name][:])\r\n else:\r\n print ('%s is not found in %s' % (surf_name, surf_file))\r\n\r\n print('------------------------------------------------------------------')\r\n print('plot_2d_contour')\r\n print('------------------------------------------------------------------')\r\n print(\"loading surface file: \" + surf_file)\r\n print('len(xcoordinates): %d len(ycoordinates): %d' % (len(x), len(y)))\r\n print('max(%s) = %f \\t min(%s) = %f' % (surf_name, np.max(Z), surf_name, np.min(Z)))\r\n print(Z)\r\n\r\n if (len(x) <= 1 or len(y) <= 1):\r\n print('The length of coordinates is not enough for plotting contours')\r\n return\r\n\r\n # --------------------------------------------------------------------\r\n # Plot 2D contours\r\n # --------------------------------------------------------------------\r\n fig = plt.figure()\r\n CS = plt.contour(X, Y, Z, cmap='viridis', levels=np.arange(vmin, vmax, vlevel))\r\n plt.clabel(CS, inline=1, fontsize=8)\r\n fig.savefig(surf_file + '_' + surf_name + '_2dcontour' + '.pdf', dpi=300,\r\n bbox_inches='tight', format='pdf')\r\n\r\n fig = plt.figure()\r\n print(surf_file + '_' + surf_name + '_2dcontourf' + '.pdf')\r\n CS = plt.contourf(X, Y, Z, cmap='viridis', levels=np.arange(vmin, vmax, vlevel))\r\n fig.savefig(surf_file + '_' + surf_name + '_2dcontourf' + '.pdf', dpi=300,\r\n bbox_inches='tight', format='pdf')\r\n\r\n # --------------------------------------------------------------------\r\n # Plot 2D heatmaps\r\n # --------------------------------------------------------------------\r\n # fig = plt.figure()\r\n # sns_plot = sns.heatmap(Z, cmap='viridis', cbar=True, vmin=vmin, vmax=vmax,\r\n # xticklabels=False, yticklabels=False)\r\n # sns_plot.invert_yaxis()\r\n # sns_plot.get_figure().savefig(surf_file + '_' + surf_name + '_2dheat.pdf',\r\n # dpi=300, bbox_inches='tight', format='pdf')\r\n\r\n # --------------------------------------------------------------------\r\n # Plot 3D surface\r\n # --------------------------------------------------------------------\r\n fig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"})\r\n surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False)\r\n ax.set_zlim(4.2, 8.0)\r\n ax.set_xlabel(\"$l_{1}$\")\r\n ax.set_ylabel(\"$l_{2}$\")\r\n ax.set_zlabel(\"Valid error\")\r\n # fig.colorbar(surf, shrink=0.5, aspect=5)\r\n fig.savefig(surf_file + '_' + surf_name + '_3dsurface.pdf', dpi=300,\r\n bbox_inches='tight', format='pdf')\r\n\r\n f.close()\r\n if show: plt.show()\r\n\r\nparser = argparse.ArgumentParser('Cifar10')\r\nparser.add_argument('--model_path', type = str, default = 'Eval-Beta_DARTS-20Cells-SVHN-Run-22/best_model.pth')\r\nparser.add_argument('--batch_size', type = int, default = 256)\r\nparser.add_argument('--data', type = str, default = '../../data')\r\nparser.add_argument('--dir_file', type = str, default = None)\r\nparser.add_argument('--auto_augment', type = bool, default = False)\r\nparser.add_argument('--cutout', type = bool, default = False)\r\nparser.add_argument('--cutout_length', type = int, default = 0)\r\nparser.add_argument('--xmax', type = float, default = 0.3)\r\nparser.add_argument('--xmin', type = float, default = -0.3)\r\nparser.add_argument('--ymax', type = float, default = 0.3)\r\nparser.add_argument('--ymin', type = float, default = -0.3)\r\nparser.add_argument('--xnum', type = int, default = 10)\r\nparser.add_argument('--ynum', type = int, default = 10)\r\nparser.add_argument('--cuda', type = str, default = True)\r\nparser.add_argument('--gpu', type = int, default = 0)\r\nparser.add_argument('--dataset', type = str, default = 'SVHN', help=\"[SVHN, CIFAR10]\")\r\n\r\nargs,unparsed = parser.parse_known_args()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n torch.cuda.set_device(args.gpu)\r\n \r\n model = torch.load(args.model_path, map_location='cpu')\r\n w = net_plotter.get_weights(model)\r\n s = copy.deepcopy(model.state_dict())\r\n \r\n dir_file = net_plotter.name_direction_file(args)\r\n net_plotter.setup_direction(args, dir_file, model)\r\n \r\n d = net_plotter.load_directions(dir_file)\r\n \r\n train_transform, valid_transform = utils_._data_transforms_cifar10(args)\r\n \r\n if args.dataset == \"CIFAR10\":\r\n train_data = datasets.CIFAR10(root = args.data, train = True, download = True, transform = valid_transform)\r\n valid_data = datasets.CIFAR10(root = args.data, train = False, download = True, transform = valid_transform)\r\n else:\r\n args.data = args.data + '/SVHN'\r\n train_data = datasets.SVHN(root=args.data,\r\n transform=train_transform, split=\"train\",\r\n download=True)\r\n valid_data = datasets.SVHN(root=args.data,\r\n transform=valid_transform, split=\"test\", \r\n download=True)\r\n \r\n train_data.data = train_data.data[0:10000]\r\n train_data.labels = train_data.labels[0:10000]\r\n \r\n # train_queue = torch.utils.data.DataLoader(train_data, batch_size = args.batch_size, num_workers=4)\r\n valid_queue = torch.utils.data.DataLoader(valid_data, batch_size = args.batch_size, num_workers=4)\r\n \r\n surf_file = name_surface_file(args, dir_file)\r\n # setup_surface_file(args, surf_file, dir_file)\r\n \r\n # crunch(surf_file, model, w, s, d, valid_queue, 'valid_loss', 'valid_acc', args)\r\n plot_2d_contour(surf_file, surf_name='valid_err', vmin=4.2\r\n , vmax=10.0, vlevel=0.2, show=True)\r\n","repo_name":"RaoXuan-1998/NADD","sub_path":"plot_surface.py","file_name":"plot_surface.py","file_ext":"py","file_size_in_byte":11043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"44055967816","text":"#!/usr/bin/env python3\nimport sys, time, json, re, random\nfrom lttngAnalyses.networking.connection import connection\nfrom multiprocessing import Process as mp\n\ndef send_text_strings(filepath):\n\twith open(filepath, 'r') as trace_hist:\n\t\tpattern = re.compile('^\\{(.+)\\}')\n\t\ttry:\n\t\t\tfor line in trace_hist:\n\t\t\t\tline = pattern.match(line)\n\t\t\t\t#print(line)\n\t\t\t\tif line!=None:\n\t\t\t\t\tclient.send(json.loads(re.sub(\"'\",'\"',line.group(0))))\n\t\t\t\t\tprint(\"%s\\n\\n\" %(re.sub(\"'\",'\"',line.group(0))))\n\t\t\t\t\ttime.sleep(random.uniform(0.5,3))\n\t\texcept KeyboardInterrupt:\n\t\t\tprint(\"User interrupted\")\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) < 3:\n\t\traise TypeError(\"Usage: './proxy_clients.py trace_dump_file_1_path.txt trace_dump_file_2_path.txt'\")\n\tclient = connection('localhost', 5432)\n\ttry:\n\t\tclient.connect('localhost', 6666)\n\texcept ConnectionRefusedError:\n\t\tprint(\"No server found running at \"+ sys.argv[1] + \":6666'\")\n\t\tsys.exit()\n\texcept:\n\t\tprint(\"Failed to connect to server\")\n\t\traise\n\tpath1 = str(sys.argv[1])\n\tpath2 = str(sys.argv[2])\n\tproc1 = mp(target=send_text_strings, args=(path1,))\n\tproc2 = mp(target=send_text_strings, args=(path2,))\n\tproc1.start()\n\tproc2.start()\n\t\t","repo_name":"mehran-47/lttngAnalysesForOpenSAF","sub_path":"lttngAnalyses/networking/dummy_client_datastream.py","file_name":"dummy_client_datastream.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"10819729469","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import linregress\nimport uncertainties.unumpy as unp\nfrom uncertainties import ufloat\n\nR = 50\nL = 3.2351e-2\nC = 8.385e-10\ni=0\nx = [2.03, 3.00, 4.00, 5.02, 6.47, 8.00, 9.99]\ny = [4, 5, 5, 6, 8, 11, 14]\ny_1 = [3, 4, 6, 7, 8, 10, 13]\n\nwhile i<7:\n x[i] = ufloat(x[i], x[i]*0.003)\n i+=1\n\nm, n, a, b, c = linregress(unp.nominal_values(x), y)\nz = np.linspace(2, 10, 500)\nz_y = m*z+n\ndel a, b, c\nd, f, a, b, c = linregress(unp.nominal_values(x), y_1)\nz_y_1 = d*z+f\ndel a, b, c\n\n#plt.plot(z, z_y, '-b', label = \"Ausgleichsgerade Max\")\n#plt.plot(z, z_y_1, '-y', label= \"Ausgleichsgerade Min\")\nplt.errorbar(unp.nominal_values(x), y, xerr=unp.std_devs(x), fmt='r.', label='Maxima')\nplt.errorbar(unp.nominal_values(x), y_1, xerr=unp.std_devs(x), fmt='g.', label='Minima')\nplt.xlabel('C in nF')\nplt.ylabel('Anzahl Extrema')\nplt.legend()\n\n# in matplotlibrc leider (noch) nicht möglich\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('build/plot.pdf')\nplt.close()\n\nx1 = [1.01, 2.03, 3.00, 4.00, 5.02, 6.47, 8.00, 9.99]\ny1 = [47090, 39990, 37275, 35730, 34760, 33870, 33260, 32740]\ny2 = [30450, 30440, 30440, 30440, 30440, 30440, 30440, 30440]\nx4 = np.linspace(1, 10, 500)\nv2 = 1/(2*np.pi*np.sqrt(L/(1/C+2/(x4*10**(-9)))))\nv3 = 1/(2*np.pi*np.sqrt(L*C))*x4/x4\n\nplt.plot(x1, y1, 'r+', label=r\"$v^-$\")\nplt.plot(x1, y2, 'g+', label=r\"$v^+$\")\nplt.plot(x4, v2, 'b-', label=r\"Theoriewerte für $v^-$\")\nplt.plot(x4, v3, 'y-', label=r\"Theoriewerte für $v^+$\")\nplt.xlabel('C in nF')\nplt.ylabel('f in Hz')\nplt.legend(loc='best')\n\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('build/plot2.pdf')\nplt.close()\n\ni=0\nw1 = [24.962, 24.972, 24.978, 24.978, 24.984, 24.978, 24.978]\nw2 = [24.987, 24.928, 24.957, 24.965, 24.971, 24.967, 24.970]\nx1 = [2.03, 3.00, 4.00, 5.02, 6.47, 8.00, 9.99]\nplt.plot(x1, w1, '-y', label=r'$v^+$')\nplt.plot(x1, w2, '-r', label=r'$v^-$')\nplt.xlabel(r'$C_\\mathrm{k}$ in nF')\nplt.ylabel(r'$v$ in kHz')\nplt.legend(loc= 'best')\n\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('build/plot3.pdf')\n\nprint(1/(np.sqrt(3.2351e-2*8.015e-10)))\nprint(1/(np.sqrt(3.2351e-2*(8.015e-10+3.7e-11))))","repo_name":"rleven/richard_joell_Praktikum","sub_path":"Gekoppelte_Schwingungen[done]/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"9592295916","text":"# Функция, возвращающая список с делителями числа\ndef get_dividers(num):\n num = abs(num)\n div_list = []\n for div in range(- num, num + 1):\n if div != 0:\n if num % div == 0:\n div_list.append(div) # Заполняем список делителей\n return div_list\n\n\nprint(*(get_dividers(int(input('Введите число: ')))))\n\n","repo_name":"stopmosk/homework-2019","sub_path":"homework_1/dividers.py","file_name":"dividers.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"25071325174","text":"class Solution:\n def searchInsert(self, nums, target: int) -> int:\n beg = 0\n end = len(nums)-1\n while beg <= end:\n mid = (end + beg)//2\n if nums[mid] == target:\n return mid\n elif nums[mid] < target:\n beg = mid+1\n else:\n end = mid -1\n if beg > end:\n return beg\n return end\n\n\nnums = [1,3,5,6]\ntarget = 5\nsolution = Solution()\nprint(solution.searchInsert(nums, target))","repo_name":"ashima96/LeetCode-Solutions","sub_path":"Python/035.py","file_name":"035.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"11923379899","text":"import sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport argparse\nimport torch_pruning as tp\nimport time\n\n#超参\ndevice = torch.device('cuda:1')\nLR = 0.001\nEPOCH = 50\nBTACH_SIZE = 100\ntrain_root = '/home/xywang/code/pruning/catdog_classification/train'\nvaild_root = '/home/xywang/code/pruning/catdog_classification/test'\n\n\n#数据加载及处理\ntest_transform = transforms.Compose([\n transforms.Resize(224),\n transforms.RandomResizedCrop(224,scale=(1.0,1.0),ratio=(1.0,1.0)),\n # transforms.RandomResizedCrop(224,scale=(0.6,1.0),ratio=(0.8,1.0)),\n # transforms.RandomHorizontalFlip(),\n # torchvision.transforms.ColorJitter(brightness=0.5, contrast=0, saturation=0, hue=0),\n # torchvision.transforms.ColorJitter(brightness=0, contrast=0.5, saturation=0, hue=0),\n transforms.ToTensor(),\n transforms.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])\n])\n\nvaild_data = torchvision.datasets.ImageFolder(\n root=vaild_root,\n transform=test_transform\n )\n\ntest_set = torch.utils.data.DataLoader(\n vaild_data,\n batch_size=BTACH_SIZE,\n shuffle=False\n)\n\ndef updateBN(model, s ,pruning_modules):\n for module in pruning_modules:\n module.weight.grad.data.add_(s * torch.sign(module.weight.data))\n \n#训练和验证\ncriteration = nn.CrossEntropyLoss()\n\ndef vaild(model,device,dataset):\n model.eval().to(device)\n correct = 0\n with torch.no_grad():\n for i,(x,y) in tqdm(enumerate(dataset)):\n x,y = x.to(device) ,y.to(device)\n output = model(x)\n loss = criteration(output,y)\n pred = output.max(1,keepdim=True)[1]\n correct += pred.eq(y.view_as(pred)).sum().item()\n print(\"Test Loss {:.4f} Accuracy {}/{} ({:.3f}%)\".format(loss,correct,len(dataset)*BTACH_SIZE,100*correct/(len(dataset)*BTACH_SIZE)))\n return 100*correct/(len(dataset)*BTACH_SIZE)\n\ndef get_pruning_modules(model):\n data = []\n for layer in model.named_modules():\n if \"bn\" in layer[0]:\n data.append(layer[1])\n return data\n\nmodel = torch.load('models/model_pruned_unknow.pth')\nprint(model)\nprec2 = vaild(model,device,test_set)\n\nrandom_input = torch.rand((16, 3, 224, 224)).to(device)\ndef test_speed(input, model, repeat=200):\n model.eval()\n start = time.time()\n with torch.no_grad():\n for i in range(repeat):\n output = model(input)\n avg_infer_time = (time.time() - start) / repeat\n\n return avg_infer_time\n\n\nmodel_original = torchvision.models.resnet50()\nmodel_original.fc = nn.Sequential(\n nn.Linear(2048,2)\n )\nmodel_original.to(device)\nmodel_original.load_state_dict(torch.load('models/model_pruning.pth'))\n\nobtain_num_parameters = lambda model:sum([param.nelement() for param in model.parameters()])\n\nprec1 = vaild(model_original,device,test_set)\ninference_time = test_speed(random_input, model_original, repeat=100)\nparameters_num = obtain_num_parameters(model_original)\n\npruning_inference_time = test_speed(random_input, model, repeat=100)\npruning_parameters_num = obtain_num_parameters(model)\n\nprint(\"original accuracy: \", prec1)\nprint(\"original parameters_num: \",parameters_num)\nprint(\"original inference time: \"+str(inference_time))\n\nprint(\"pruning accuracy: \", prec2)\nprint(\"pruning parameters_num: \",pruning_parameters_num)\nprint(\"pruning inference time: \"+str(pruning_inference_time))\n","repo_name":"wxy1234567/Resnet50-pruning","sub_path":"resnet50_catdog/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"28"}
+{"seq_id":"19237078248","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# =============================================================================\n# Created By : Simon Schaefer\n# Description : Compare psnr and runtime of models.\n# Arguments : Filter (format=\"key1:value1&key2:value2...\")\n# =============================================================================\nimport argparse\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.optimize import curve_fit\n\nfrom utils import *\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nparser = argparse.ArgumentParser(description=\"psnr_time\")\nparser.add_argument(\"--directory\", type=str, default=\"\")\nparser.add_argument(\"--psnr_tag\", type=str, default=\"SHRT_mean\",\n choices=(\"SHRT_mean\", \"SCOLT_mean\",\n \"SHRT_best\", \"SCOLT_best\"))\nparser.add_argument(\"--filter\", type=str, default=\"\")\nparser.add_argument(\"--emphasize_models\", type=str, default=\"\",\n help=\"format = modela:modelb...\")\nparser.add_argument(\"--no_emphasize\", action=\"store_true\")\nargs = parser.parse_args()\n\nprint(\"Scrapping outs data ...\")\npsnr_tag = \"PSNR_{}\".format(args.psnr_tag)\ndir = os.path.join(os.environ[\"SR_PROJECT_OUTS_PATH\"], args.directory)\ndata = scrap_outputs(directory=dir)\n\nprint(\"... add baseline results\")\ndata = add_baseline_results(data)\n\nprint(\"... filtering outs data\")\nfor key_value in args.filter.split(\"&\"):\n if key_value == \"\": continue\n key, value = key_value.split(\":\")\n if len(value.split(\"/\")) > 0:\n data = data[data[key].isin(value.split(\"/\"))]\n else:\n data = data[data[key] == value]\n\nprint(\"... averaging over models\")\ndata = average_key_over_key(data, psnr_tag, \"model\", \"dataset\")\n\nprint(\"... plotting psnr boxplot plots\")\nf, axes = plt.subplots(figsize=(8,8))\nsns.violinplot(x=\"model\",y=psnr_tag,data=data, orient='v')\nplt.ylabel(\"PSNR [dB]\")\nplt.xticks(rotation=20)\nplt.savefig(save_path(\"pnsr_boxplot.png\"))\nplt.close()\n\n# print(\"... regressing non-linear function\")\n# def func(x, a, b, c, d):\n# return a + b*x + c*np.exp(d*x)\n\nprint(\"... plotting complexity-psnr-correlation plot\")\nspecial_models = args.emphasize_models.split(\":\")\nunique_datasets = np.unique(data[\"dataset\"])\nnum_unique_dsets = len(unique_datasets)\nf, axes = plt.subplots(1, num_unique_dsets, figsize=(8*num_unique_dsets,8))\nfor id, dataset in enumerate(unique_datasets):\n xs, ys = [], []\n # Plot actual data.\n for index, row in data.iterrows():\n if not row[\"dataset\"] == dataset: continue\n x, y = row[\"complexity\"], row[\"{}_model_dataset_avg\".format(psnr_tag)]\n axes[id].scatter(x, y, marker='x')\n if args.no_emphasize:\n font = {'color': 'black', 'weight': 'ultralight', 'size': 8}\n else:\n if row[\"model\"] == \"AETAD_COLOR\": row[\"model\"] = \"Kim et al.\" #retrained as not given\n if row[\"model\"] == \"Kim et al.\":\n font = {'color': 'red', 'weight': 'bold', 'size': 10,\n 'horizontalalignment': 'right'}\n elif row[\"model\"] in special_models:\n font = {'color': 'green', 'weight': 'bold', 'size': 10}\n elif row[\"model\"] in \"no_tad\":\n font = {'color': 'orange', 'weight': 'bold', 'size': 10}\n else:\n font = {'color': 'black', 'weight': 'ultralight', 'size': 8}\n axes[id].text(x+.03, y+.03, row[\"model\"], fontdict=font)\n xs.append(x/100000); ys.append(y)\n # Plot non-linear regression.\n # guess_params = [120, 100, -10, -0.01]\n # popt, _ = curve_fit(func, xs, ys, guess_params)\n # xs_plot = (np.linspace(np.min(xs), np.max(xs), num=20)*100000).tolist()\n # print(popt)\n # print(\"-\"*20)\n # ys_plot = [func(x/100000, *popt) for x in xs_plot]\n # axes[id].plot(xs_plot, ys_plot, '--')\n axes[id].set_title(dataset)\n axes[id].set_ylabel(\"PSNR [dB]\")\n axes[id].set_xlabel(\"# Model parameters\")\nplt.savefig(save_path(\"psnr_complexity_linear.png\"))\nplt.close()\n","repo_name":"simon-schaefer/tar","sub_path":"src/evaluation/compare_psnr_complexity.py","file_name":"compare_psnr_complexity.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"28"}
+{"seq_id":"13418960712","text":"# Strings Continued\n\n# 9.1\n# Traversal and the for loop, by item\nfor aName in ['Joe', 'Amy', 'Brad', 'etc']:\n print('Hi ' + aName + '. Please come to my party Sat!')\n\n# the loop variable takes on each value in the sequence.\nfor aValue in range(10): # same is true with range\n print(aValue)\n# This will traverse a string and print ever character\nfor aChar in \"Go Spot Go\":\n print(aChar)\n# G \\n o \\n \\n S \\n p \\n o \\n t ...\n\n# 9.2\n# Traversal and the for loop; by index\nfruit = 'apple'\nfor idx in range(len(fruit)):\n print(fruit[idx])\n\n# 9.3\n# Traversal and the while loop\nfruit = 'apple'\nposition = 0\nwhile position < len(fruit):\n print(fruit[position])\n position += 1\n\n# 9.4\n# The in and not in operators\nprint('p' in 'apple') # True\nprint('i' in 'apple') # False\nprint('ap' in 'apple') # True\nprint('pa' in 'apple') # False\nprint('' in 'apple') # True\nprint('x' not in 'apple') # True\n\n# 9.5\n# The Accumulator Pattern with Strings\ndef removeVowels(s):\n vowels = 'aeiouAEIOU'\n sWithoutVowels = \"\"\n for eachChar in s:\n if eachChar not in vowels:\n sWithoutVowels += eachChar\n return sWithoutVowels\n\nprint(removeVowels('compsci'))\nprint('aAxEeyIizOopUuq')\n\n# 9.6\n# Looping and Counting\ndef count(text, aChar):\n lettercount = 0\n for c in text:\n if c == aChar:\n lettercount += 1\n return lettercount\nprint(count('banana', 'a'))\n# how many a's are in banana?\n\nprint('\\n\\n\\n')\n# 9.7\n# A Find Function / Method\ndef find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1\n\nprint(find(\"compsci\", 'p'))\nprint(find(\"Compsci\", 'C'))\nprint(find(\"Compsci\", 'i'))\nprint(find(\"Compsci\", 'x'))\n# finds the index location of the in question\n\n# 9.8\n# Optional Parameters\ndef find4(astring, achar, start=0, end=None):\n \"\"\"\n Find and return the index of achar in astring.\n Return -1 if achar does not occur in astring.\n \"\"\"\n ix = start\n if end == None:\n end = len(astring)\n\n found = False\n while ix < end and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1\n\n# 9.9\n# Character Classification\nimport string\nprint(string.ascii_lowercase)\nprint(string.ascii_uppercase)\nprint(string.digits)\nprint(string.punctuation)\nprint(string.whitespace)\n\n# 9.10\n# Summary\nindexing = 'this'\nprint(indexing[2]) # i\n# length function (len)\n# for loop traversal\nfor i in 'Example':\n print(i, end=' - ')\n print('\\n')\n# Slicing [:]\n# String Comparison ( >, <, >=, <=, ==, !=)\n# In and Not In operators\nif 'heck' in 'I\\'ll be checking for you':\n print('True')\nif 'heck' not in 'I\\'ll be checking for you':\n print('False') # it is so this won't print\n","repo_name":"sbburton/cs50","sub_path":"old-files/LC101/ch9Notes.py","file_name":"ch9Notes.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"36132381903","text":"import cv2\n\nfrom test_frame_annotations import get_video_labels, write_annotations\n\nimport logging\nfrom util import setup_cli_logging\n\n\ndef video_output():\n VIDEO_CODEC = \"mp4v\"\n fps = 59.94\n width = 1280\n height = 720\n\n out = cv2.VideoWriter(\"tmp/ann_out_test.mp4\", cv2.VideoWriter_fourcc(\n *VIDEO_CODEC), fps, (width, height))\n\n capture = cv2.VideoCapture(\"tmp/026c7465-309f6d33.mp4\")\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n video_labels = get_video_labels()\n\n for frame in range(frame_count):\n ret, img = capture.read()\n if ret == False:\n break\n img = write_annotations(img, frame, video_labels)\n out.write(img)\n\n out.release()\n capture.release()\n\n logging.info(\"Annotated video test saved to tmp/ann_out_test\")\n\n\nif __name__ == \"__main__\":\n setup_cli_logging()\n video_output()\n","repo_name":"jeaguil/motgocr","sub_path":"tests/test_video_output.py","file_name":"test_video_output.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20545954317","text":"from functools import partial\n\nimport numpy as np\nimport scipy.optimize as opt\nfrom scipy.integrate import quad as _integrate_\nfrom scipy.special import gamma as gamma_f\nfrom scipy.stats import cauchy, norm\n\nfrom copulae.special.trig import cospi2, tanpi2\nfrom copulae.types import Numeric\n\n__all__ = ['skew_stable']\n\nLARGE_EXP_POWER = 708.396418532264 # exp(LEP) == inf. Effective a value for which the exponent is equivalent to inf\nPI2 = np.pi / 2\n\n\nclass skew_stable:\n \"\"\"\n Skew Stable Distribution\n\n The function uses the approach of J.P. Nolan for general stable distributions. Nolan (1997) derived expressions \n in form of integrals based on the characteristic function for standardized stable random variables. For \n probability density and cumulative probability density, these integrals are numerically evaluated using scipy's \n integrate() function. \n\n \"S0\" parameterization [pm=0]: based on the (M) representation of Zolotarev for an alpha stable distribution with\n skewness beta. Unlike the Zolotarev (M) parameterization, gamma and delta are straightforward scale and shift\n parameters. This representation is continuous in all 4 parameters, and gives an intuitive meaning to gamma and\n delta that is lacking in other parameterizations. Switching the sign of beta mirrors the distribution at the\n vertical axis x = delta, i.e.,\n\n f(x, α, -β, γ, δ, 0) = f(2δ-x, α, +β, γ, δ, 0),\n\n \"S\" or \"S1\" parameterization [pm=1]: the parameterization used by Samorodnitsky and Taqqu in the book\n Stable Non-Gaussian Random Processes. It is a slight modification of Zolotarev's (A) parameterization.\n\n \"S*\" or \"S2\" parameterization [pm=2]: a modification of the S0 parameterization which is defined so that (i) the\n scale gamma agrees with the Gaussian scale (standard dev.) when alpha=2 and the Cauchy scale when alpha=1, (ii)\n the mode is exactly at delta. For this parametrization, stableMode(alpha,beta) is needed.\n \"\"\"\n\n @classmethod\n def pdf(cls, x: Numeric, alpha: float, beta: float, gamma=1., delta=0., pm=0):\n cls._check_parameters(alpha, beta, gamma, pm)\n delta, gamma = cls._form_parameters(alpha, delta, gamma, beta, pm)\n\n x = (x - delta) / gamma\n\n if alpha == 2:\n ans = norm.logpdf(x, 0, np.sqrt(2))\n if np.array(ans).size == 1:\n ans = np.ravel([ans])\n\n elif alpha == 1 and beta == 0:\n ans = cauchy.logpdf(x)\n if np.array(ans).size == 1:\n ans = np.ravel([ans])\n\n elif alpha == 1: # beta not 0\n if isinstance(x, (float, int)):\n ans = np.array([_pdf_f2(x, beta)])\n else:\n ans = np.array([_pdf_f2(e, beta) for e in x])\n else: # alpha != 1\n bt = beta * tanpi2(alpha)\n zeta = -bt\n theta0 = min(max(-PI2, np.arctan(bt) / alpha), PI2)\n\n if bt == 0:\n zeta_tol = 4e-10\n elif 1 - abs(beta) < 0.01 or alpha < 0.01:\n zeta_tol = 2e-9\n else:\n zeta_tol = 5e-5\n if isinstance(x, (float, int)):\n ans = np.array([_pdf_f1(x, alpha, beta, zeta, theta0, zeta_tol)])\n else:\n ans = np.array([_pdf_f1(e, alpha, beta, zeta, theta0, zeta_tol) for e in x])\n\n infs = ans == 0\n if np.any(ans):\n d = cls._pareto_pdf(x, alpha, beta)\n ans[infs] = d[infs] / gamma\n\n if np.any(~infs):\n ans[~infs] /= gamma\n\n return float(ans) if ans.size == 1 else ans\n\n @classmethod\n def logpdf(cls, x: Numeric, alpha: float, beta: float, gamma=1., delta=0., pm=0):\n return np.log(cls.pdf(x, alpha, beta, gamma, delta, pm))\n\n @classmethod\n def rvs(cls, alpha: float, beta: float, gamma=1., delta=0., pm=1, size: Numeric = 1):\n cls._check_parameters(alpha, beta, gamma, pm)\n delta, gamma = cls._form_parameters(alpha, delta, gamma, beta, pm)\n\n if np.isclose(alpha, 1) and np.isclose(beta, 0):\n z = cauchy.rvs(size=size)\n else:\n theta = np.pi * (np.random.uniform(size=size) - 0.5)\n w = np.random.standard_exponential(size)\n\n bt = beta * tanpi2(alpha)\n t0 = min(max(-PI2, np.arctan(bt) / alpha), PI2)\n at = alpha * (theta + t0)\n\n c = (1 + bt ** 2) ** (1 / (2 * alpha))\n\n z = c * np.sin(at) \\\n * (np.cos(theta - at) / w) ** (1 / alpha - 1) \\\n / np.cos(theta) ** (1 / alpha) \\\n - bt\n\n return z * gamma + delta\n\n @classmethod\n def _check_parameters(cls, alpha: float, beta: float, gamma=1., pm=1):\n if pm < 0 or pm > 1:\n raise ValueError(\"parametrization must be an integer in [0, 1, 2]\")\n if abs(beta) > 1:\n raise ValueError(\" must be between [-1, 1]\")\n if alpha <= 0 or alpha > 2:\n raise ValueError(\" must be between (0, 2]\")\n if gamma < 0:\n raise ValueError(\" must be >= 0\")\n\n @classmethod\n def _form_parameters(cls, alpha, delta: float, gamma: float, beta: float, pm=0):\n if pm == 1:\n delta += beta * gamma * _omega(gamma, alpha)\n elif pm == 2:\n gamma *= alpha ** (-1 / alpha)\n delta -= gamma * cls._mode(alpha, beta)\n\n return delta, gamma\n\n @classmethod\n def _mode(cls, alpha: float, beta: float, beta_max=1 - 1e-11):\n cls._check_parameters(alpha, beta)\n\n if alpha * beta == 0:\n return 0\n\n beta = max(beta, beta_max)\n bounds = sorted([0, np.sign(beta) * -0.7])\n\n return float(opt.minimize_scalar(lambda x: -cls.pdf(x, alpha, beta), bounds=bounds)['x'])\n\n @classmethod\n def _pareto_pdf(cls, x: Numeric, alpha, beta, log=False):\n \"\"\"Tail approximation density for stable pdf\"\"\"\n x = np.asarray(x)\n if x.ndim == 0:\n x = x.reshape(1)\n\n neg = x < 0\n if np.any(neg):\n x[neg] *= -1\n beta = np.repeat(beta, len(x))\n beta[neg] *= -1\n\n if log:\n return np.log(alpha) + np.log1p(beta) + cls._stable_tail(alpha, log=True) - (1 + alpha) * np.log(x)\n else:\n return alpha * (1 + beta) * cls._stable_tail(alpha) * x ** (-1 - alpha)\n\n @staticmethod\n def _stable_tail(alpha, log=False):\n if alpha == 0:\n return -np.log(2) if log else 0.5\n elif alpha == 2:\n return -np.inf if log else 0\n else:\n r = gamma_f(alpha) / np.pi * np.sin(alpha * PI2)\n return np.log(r) if log else r\n\n\ndef _omega(gamma: float, alpha: float):\n if not alpha.is_integer():\n return tanpi2(alpha)\n elif alpha == 1:\n return 2 / np.pi * np.log(gamma)\n else:\n return 0\n\n\ndef _pdf_f1(x: float, zeta: float, alpha: float, beta: float, theta0: float, zeta_tol=1e-16):\n \"\"\"\n Helper function to derive probability density at point 'x'\n\n Parameters\n ----------\n x: float\n numeric\n\n zeta: float\n numeric bound between (-inf, inf). infinity when alpha -> 1\n\n alpha: float\n bound between [0, 2]\n\n beta: float\n bound between [-1, 1]\n\n theta0: float\n bound between [-pi / 2, pi / 2]\n\n zeta_tol: float\n zeta tolerance\n Returns\n -------\n float\n numeric\n \"\"\"\n x_m_zeta = abs(x - zeta)\n\n if np.isfinite(x) and (x_m_zeta <= zeta_tol * (zeta_tol + max(abs(x), abs(zeta)))):\n return gamma_f(1 + 1 / alpha) * np.cos(theta0) / (np.pi * (1 + zeta ** 2) ** (1 / (2 * alpha)))\n\n small_alpha = alpha < 1e-17\n\n if x < zeta:\n theta0 = -theta0\n\n if small_alpha:\n beta = -beta\n x = -x\n\n if small_alpha:\n return 0 if alpha == 0 else np.exp(np.log(alpha) + np.log1p(beta) - (1 + np.log(2 * x + np.pi * alpha * beta)))\n\n pg_f1 = partial(_g_th1, alpha=alpha, theta0=theta0, x_m_zeta=x_m_zeta)\n\n g_pi = pg_f1(PI2)\n g_t0 = pg_f1(-theta0)\n\n if (alpha >= 1 and ((not np.isnan(g_pi) and g_pi > LARGE_EXP_POWER) or np.isclose(g_t0, 0))) or \\\n (alpha < 1 and ((not np.isnan(g_t0) and g_pi > LARGE_EXP_POWER) or np.isclose(g_pi, 0))):\n return 0\n\n g_pi = pg_f1(_e_plus(-theta0, 1e-6)) if alpha >= 1 else pg_f1(PI2 * (1 - 1e-6))\n\n if np.isnan(g_pi) and max(x_m_zeta, x_m_zeta / abs(x)) < 0.01:\n return gamma_f(1 + 1 / alpha) * np.cos(theta0) / (np.pi * (1 + zeta ** 2) ** (1 / (2 * alpha)))\n\n pg_f2 = partial(_g_th2, alpha=alpha, theta0=theta0, x_m_zeta=x_m_zeta)\n\n if not np.isnan(g_pi) and ((alpha >= 1 and g_pi > 1) or (alpha < 1 and g_pi < 1)):\n theta2 = PI2 * (1 - 1e-6)\n g2_th2 = pg_f2(theta2)\n elif (alpha < 1 < g_t0) or (alpha >= 1 > g_t0):\n theta2 = _e_plus(-theta0, 1e-6)\n g2_th2 = pg_f2(theta2)\n else:\n l_th, u_th = -theta0, PI2\n\n if alpha < 1:\n while True:\n _th = (l_th + PI2) / 2\n g_th = pg_f1(_th)\n if g_th != 0:\n break\n l_th = _th\n\n if g_th == 1:\n while True:\n _th = (l_th + u_th) / 2\n g_th = pg_f1(_th)\n if g_th != 1:\n break\n u_th = _th\n if np.isclose(u_th - l_th, 0):\n return 0\n\n ur1 = opt.bisect(lambda xx: pg_f1(xx) - 1, l_th, u_th)\n g_1 = _exp(ur1 + 1)\n try:\n ur2 = opt.bisect(lambda xx: np.log(pg_f1(xx)), l_th, u_th)\n g_2 = _exp(np.exp(ur2))\n except ValueError:\n ur2 = np.inf\n g_2 = -np.inf\n\n if g_1 >= g_2:\n theta2 = ur1\n g2_th2 = g_1\n else:\n theta2 = ur2 # this will never be infinity\n g2_th2 = g_2\n\n eps = 1e-4\n\n do1 = g2_th2 > eps > pg_f2(-theta0)\n\n _INT = lambda a, b: _integrate(pg_f2, a, b)\n\n if do1:\n th1 = opt.bisect(lambda xx: pg_f2(xx) - eps, -theta0, theta2)\n r1 = _INT(-theta0, th1)\n r2 = _INT(th1, theta2)\n else:\n r1 = 0\n r2 = _INT(-theta0, theta2)\n\n do4 = g2_th2 > eps > pg_f2(PI2)\n if do4:\n th3 = opt.bisect(lambda xx: pg_f2(xx) - eps, theta2, PI2)\n r3 = _INT(theta2, th3)\n r4 = _INT(th3, PI2)\n else:\n r3 = _INT(theta2, PI2)\n r4 = 0\n\n return (alpha / (np.pi * abs(alpha - 1) * x_m_zeta)) * (r1 + r2 + r3 + r4)\n\n\ndef _pdf_f2(x: float, beta: float):\n i2b = 1 / (2 * beta)\n p2b = np.pi * i2b\n ea = -p2b * x\n\n if np.isinf(ea):\n return 0\n\n ur: float = opt.bisect(lambda u: _g_u1(u, x, beta) - 1, -1, 1)\n\n r1 = _integrate(_g_u2, -1, ur)\n r2 = _integrate(_g_u2, ur, 1)\n\n return PI2 * abs(i2b) * (r1 + r2)\n\n\ndef _g_th1(th: float, alpha: float, theta0: float, x_m_zeta: float):\n \"\"\"Helper calculation function\"\"\"\n\n if np.isclose(PI2 - np.sign(alpha - 1) * th, 0):\n return 0\n at0 = alpha * theta0\n att = at0 + alpha * th\n return np.cos(att - th) * (np.cos(at0) * np.cos(th) * ((x_m_zeta / np.sin(att)) ** alpha)) ** (1 / (alpha - 1))\n\n\ndef _g_th2(th: float, alpha: float, theta0: float, x_m_zeta: float):\n \"\"\"Helper calculation function\"\"\"\n return _exp(_g_th1(th, alpha, theta0, x_m_zeta))\n\n\ndef _g_u1(u, x, beta):\n \"\"\"Helper calculation function\"\"\"\n if abs(u + np.sign(beta)) < 1e-10:\n return 0\n\n p2b = np.pi / (2 * beta)\n ea = -p2b * x\n th = u * PI2\n\n h = p2b + th\n return h / p2b * np.exp(ea + h * tanpi2(u)) / cospi2(u)\n\n\ndef _g_u2(u, x, beta):\n \"\"\"Helper calculation function\"\"\"\n return _exp(_g_u1(u, x, beta))\n\n\ndef _e_minus(x: float, eps: float):\n \"\"\"Helper calculation function\"\"\"\n return x - eps * abs(x)\n\n\ndef _e_plus(x: float, eps: float):\n \"\"\"Helper calculation function\"\"\"\n return x + eps * abs(x)\n\n\ndef _exp(x: float):\n \"\"\"Helper calculation function\"\"\"\n r = x * np.exp(-x)\n return r if x < LARGE_EXP_POWER else 0\n\n\ndef _integrate(f, lower: float, upper: float):\n \"\"\"Helper integration function\"\"\"\n return _integrate_(f, lower, upper, limit=1000)[0]\n","repo_name":"chrisburr/copulae","sub_path":"copulae/stats/_stable.py","file_name":"_stable.py","file_ext":"py","file_size_in_byte":12202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"28"}
+{"seq_id":"34922624430","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\n# from testenv.home.models import Farm, Farmer, Plot, ProjectManager, RegionalManager, Salesman, TechSupport\nfrom rest_framework import exceptions\nfrom django.core.exceptions import PermissionDenied\nfrom testenv.home.models import CustomUser, Farm,Plot\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = CustomUser\n fields = ('username','role','manager', 'first_name', 'last_name')\n\n def validate(self, data): \n \n parent_entity = data['manager']\n children=self.context['request'].user.get_all_children()\n \n \n # raise Exception(self.context['request'].user.role ==('FM' or 'PMG' or 'TSP'))\n if parent_entity == self.context['request'].user:\n return data\n if parent_entity in children:\n return data\n else:\n raise PermissionDenied()\n\n# class ProjectManagerSerializer(serializers.HyperlinkedModelSerializer):\n# user=UserSerializer(required=True)\n# class Meta:\n# model = ProjectManager\n# fields = ['name','user']\n \n# # def create(self, validated_data):\n# # \"\"\"\n# # Overriding the default create method of the Model serializer.\n# # :param validated_data: data containing all the details of student\n# # :return: returns a successfully created student record\n# # \"\"\"\n# # user_data = validated_data.pop('user')\n# # user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n# # projectmanager, created = ProjectManager.objects.update_or_create(user=user, parent=validated_data.pop('parent'))\n# # return projectmanager\n\n# class TechSupportSerializer(serializers.HyperlinkedModelSerializer):\n \n# class Meta:\n# model = TechSupport\n# fields = ['name']\n\n# class RegionalManagerSerializer(serializers.HyperlinkedModelSerializer):\n# user=UserSerializer(required=True)\n# class Meta:\n# model = RegionalManager\n# fields = ['name','parent','user']\n\n# def create(self, validated_data):\n# \"\"\"\n# Overriding the default create method of the Model serializer.\n# :param validated_data: data containing all the details of student\n# :return: returns a successfully created student record\n# \"\"\"\n# user_data = validated_data.pop('user')\n# user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n# regionalmanager, created = RegionalManager.objects.update_or_create(user=user,name=validated_data.pop('name'), parent=validated_data.pop('parent'))\n# return regionalmanager\n\n# def validate(self, data): \n \n# parent_entity = data['parent']\n \n# if parent_entity.user == self.context['request'].user:\n# return data\n# else:\n# raise PermissionDenied()\n\n# class SalesmanSerializer(serializers.HyperlinkedModelSerializer):\n# # parent=RegionalManagerSerializer(many=True, required=True)\n# user=UserSerializer(required=True)\n# class Meta:\n# model = Salesman\n# fields = ['name','parent','user']\n \n# def create(self, validated_data):\n# \"\"\"\n# Overriding the default create method of the Model serializer.\n# :param validated_data: data containing all the details of student\n# :return: returns a successfully created student record\n# \"\"\"\n# user_data = validated_data.pop('user')\n# user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n# salesman, created = Salesman.objects.update_or_create(user=user,name=validated_data.pop('name'), parent=validated_data.pop('parent'))\n# return salesman\n\n# def validate(self, data): \n \n# parent_entity = data['parent']\n \n# if parent_entity.user == self.context['request'].user:\n# return data\n# elif parent_entity.parent.user ==self.context['request'].user:\n# return data\n# else:\n# raise PermissionDenied()\n\n# class FarmerSerializer(serializers.HyperlinkedModelSerializer):\n# user=UserSerializer(required=True)\n# class Meta:\n# model = Farmer\n# fields = ['name','parent','user']\n\n# def create(self, validated_data):\n# \"\"\"\n# Overriding the default create method of the Model serializer.\n# :param validated_data: data containing all the details of student\n# :return: returns a successfully created student record\n# \"\"\"\n# user_data = validated_data.pop('user')\n# user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n# farmer, created = Farmer.objects.update_or_create(user=user,name=validated_data.pop('name'), parent=validated_data.pop('parent'))\n# return farmer\n\n# def validate(self, data): \n \n# parent_entity = data['parent']\n \n# if parent_entity.user == self.context['request'].user:\n# return data\n# elif parent_entity.parent.user ==self.context['request'].user:\n# return data\n# elif parent_entity.parent.parent.user ==self.context['request'].user:\n# return data\n# else:\n# raise PermissionDenied()\n\nclass PlotSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Plot\n fields = ['name','rotation']\n\nclass FarmSerializer(serializers.HyperlinkedModelSerializer):\n # owner = UserSerializer(required=True)\n plots=PlotSerializer(many=True, required=False)\n \n class Meta:\n model = Farm\n fields = ['name','adress','plots','owner']\n \n def validate(self, data): \n \n parent_entity = data['owner']\n children=self.context['request'].user.get_all_children()\n \n \n # raise Exception(self.context['request'].user.role ==('FM' or 'PMG' or 'TSP'))\n if parent_entity == self.context['request'].user:\n if self.context['request'].user.role ==('FM' or 'PMG' or 'TSP'):\n return data\n else:\n raise PermissionDenied()\n elif parent_entity in children:\n if parent_entity.role ==('FM' or 'PMG' or 'TSP'):\n return data\n else:\n raise PermissionDenied()\n else:\n raise PermissionDenied()\n \n\n # elif Salesman.objects.filter(user_id=parent_entity.parent).exists():\n # if parent_entity.parent.user == self.context['request'].parent.user:\n # return data\n # else:\n # raise exceptions.NotAcceptable(detail=None, code=None)\n # else:\n # raise exceptions.NotAcceptable(detail=None, code=None)\n\n","repo_name":"sebastiennoiron/testenv","sub_path":"testenv/home/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"71581154635","text":"import math\n\nimport facto\nfrom timeit import default_timer as timer\nimport matplotlib.pyplot as plt\n\n\ndef print_hi(name):\n coefficient = [1, 1, 1]\n fct = facto.facto.from_factoradic_to_decimal(coefficient)\n coefficients = [1, 1]\n fct2 = facto.facto_from_factoradic(coefficients)\n print(facto.Facto(15))\n\n\ndef script(facto):\n while facto.decimal < 1000000:\n deci = facto.get_integer()\n numCof = len(facto.get_coefficients())\n counter = 0\n while deci > 0:\n deci //= 10\n counter += 1\n print(str(counter) + \" : \" + str(numCof))\n facto.decimal += 1\n\n\ndef norm_fact(current, rem_counter):\n while rem_counter > 0:\n current = facto.facto.next_permutation(current)\n rem_counter -= 1\n return current\n\n\ndef snapshots_with_decider(reference):\n norm_snapshots = list()\n new_snapshots = list()\n items = reference[:]\n factorial = math.factorial(len(items))\n timers = list()\n curr_num_of_digits = 1\n for i in range(1, factorial):\n start = timer()\n facto.facto.permutation_from_coefficients(reference, facto.facto.decimal_to_coefficients(i))\n timers.append(timer() - start)\n start = timer()\n norm_fact(items, 1)\n end = timer()\n if i == 1:\n timers.append(end - start)\n else:\n timers.append(timers[-2] + (end - start))\n if len(facto.facto.decimal_to_coefficients(i)) > curr_num_of_digits:\n new_snapshots.append(timers[-4])\n norm_snapshots.append(timers[-3])\n new_snapshots.append(timers[-2])\n norm_snapshots.append(timers[-1])\n curr_num_of_digits += 1\n return new_snapshots, norm_snapshots\n\n\nif __name__ == '__main__':\n ref = [1, 2, 3]\n start_num = 3\n max_num = 10\n snapshots = list()\n while start_num <= max_num:\n new, norm = snapshots_with_decider(ref)\n snapshots.append([new, norm])\n start_num += 1\n ref.append(start_num)\n print(start_num)\n count = 3\n for i in snapshots:\n fig, ax = plt.subplots()\n y = list()\n for j in range(3, count+1):\n y.append(math.factorial(j)-1)\n y.append(math.factorial(j))\n ax.scatter(y, i[0], label=(str(count) + \" NEW__DIGIT_INCREASE\"))\n ax.scatter(y, i[1], label=(str(count) + \" OLD__DIGIT_INCREASE\"))\n ax.legend()\n plt.savefig(str(count) + \"OLD&NEW__DIGIT_INCREASE.png\")\n count += 1\n plt.show()\n\n# fct1 = facto.Facto(0)\n# while perm != perm2:\n# perm = facto.facto.next_permutation(perm)\n# fct1.decimal += 1\n# print(facto.facto.permutation_from_coefficients(reference, fct1.get_coefficients()))\n# print(perm)\n\n\n# print_hi(facto.next_permutation())\n","repo_name":"Sinua/facto.py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"25015677226","text":"from json import dumps\nfrom bookstation.models.book_sys import Book\nfrom bookstation.models.user_sys import Notification_history\nfrom bookstation.models.user_sys import Notification\nfrom bookstation import app, request, db\nfrom bookstation.models.user_sys import User\nfrom bookstation.utils.auth_util import get_user\nfrom sqlalchemy import desc\n\n\"\"\"\nFunction for users to check number of new notifications\nArgs:\n\ttoken (string): token of the user\nReturns:\n\t- to_read (int): book id\n\t\n\"\"\"\n@app.route(\"/notification/checknew\", methods=[\"GET\"])\ndef checknewnotif():\n\ttoken = request.args.get('token')\n\tuser = get_user(token)\n\tnotifications = Notification.query.filter_by(user_id=user.user_id).all()\n\tnotification_history = Notification_history.query.filter_by(user_id=user.user_id).first()\n\tif notification_history == None:\n\t\treturn dumps({ 'to_read' : len(notifications)})\n\telse:\n\t\tnotification_history.last_read\n\t\treturn dumps({ 'to_read' : len(notifications)-notification_history.last_read})\n\t\t\n\"\"\"\nFunction for users to get all notification history\nArgs:\n\ttoken (string): token of the user\nReturns:\n\t- to_read (int): book id\n\"\"\"\n@app.route(\"/notification/getall\", methods=[\"GET\"])\ndef getallnotif():\n\n\ttoken = request.args.get('token')\n\tuser = get_user(token)\n\tall_notifications = []\n\n\tnotifications = Notification.query.filter_by(user_id = user.user_id).order_by(desc(Notification.time)).all()\n\tfor notification in notifications:\n\t\tsender = User.query.get(notification.sender_id)\n\t\tnotif = {\n\t\t\t'sender_name': sender.username,\n\t\t\t'sender_id': notification.sender_id,\n\t\t\t'type': notification.type,\n\t\t\t'time': str(notification.time)\n\t\t}\n\t\tif notification.type_id != -1 :\n\t\t\tnotif['book_id'] = notification.type_id\n\t\t\tnotif['book_name'] = Book.query.get(notification.type_id).title\n\t\tall_notifications.append(notif)\n\tprint('my token'+str(token))\n\tprint('my id'+str(user.user_id))\n\thistory = Notification_history.query.filter_by(user_id=user.user_id).first()\n\tprint(history)\n\thistory.last_read = len(all_notifications)\n\tdb.session.add(history)\n\tdb.session.commit()\n\n\treturn dumps({'notifications' : all_notifications})","repo_name":"Langley999/capstone-project-3900-t14b-bigmac","sub_path":"backend/bookstation/controllers/notification/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"15360774961","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\ndef pytest_addoption(parser):\n parser.addoption('--language', action='store', default='en',\n help=\"Choose language: ru or other\")\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n lang = request.config.getoption(\"language\")\n print(\"\\nstart chrome browser for test..\")\n options = Options()\n #options.add_argument('--headless') # если раскоментировать эти две строчки, \n #options.add_argument('--start-maximized') # тесты будут запускаться в фоновом режиме\n options.add_experimental_option('prefs', {'intl.accept_languages': lang})\n browser = webdriver.Chrome(options=options) \n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n ","repo_name":"Atatanoff/page_object","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"9766416929","text":"import os\n\nbase_address = 0x80400000\nstep = 0x20000\nlinker = \"src/linker.ld\"\n\napp_id = 0\napps = os.listdir(\"build/app\")\napps.sort()\nchapter = os.getenv(\"CHAPTER\")\n\n# we use `Clink-args=-Ttext=%x` to set the base address of the app\n# so do not need to change the linker.ld\nfor app in apps:\n app = app[: app.find(\".\")]\n os.system(\n \"cargo rustc --bin %s --release -- -Clink-args=-Ttext=%x\"\n % (app, base_address + step * app_id)\n )\n print(\n \"[build.py] application %s start with address %s\"\n % (app, hex(base_address + step * app_id))\n )\n if chapter == \"3\" or int(chapter) == 3:\n app_id = app_id + 1\n","repo_name":"CelestialMelody/rcore-lab","sub_path":"user/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"}
+{"seq_id":"74133806474","text":"\"\"\"\nA salt cloud provider that lets you use virtualbox on your machine\nand act as a cloud.\n\nFor now this will only clone existing VMs. It's best to create a template\nfrom which we will clone.\n\nFollowed\nhttps://docs.saltstack.com/en/latest/topics/cloud/cloud.html#non-libcloud-based-modules\nto create this.\n\nDicts provided by salt:\n __opts__ : contains the options used to run Salt Cloud,\n as well as a set of configuration and environment variables\n\"\"\"\n\n# Import python libs\nimport logging\n\n# Import salt libs\nimport salt.config as config\nimport salt.utils.cloud\n\nlog = logging.getLogger(__name__)\n\n# Import virtualbox libs\nHAS_LIBS = False\ntry:\n # This code assumes vboxapi.py from VirtualBox distribution\n # being in PYTHONPATH, or installed system-wide\n from vboxapi import VirtualBoxManager\n\n HAS_LIBS = True\n\nexcept ImportError:\n VirtualBoxManager = None\n log.error(\"Couldn't import VirtualBox API\")\n\n\"\"\"\nThe name salt will identify the lib by\n\"\"\"\n__virtualname__ = 'virtualbox'\n_virtualboxManager = None\n\n\ndef __virtual__():\n \"\"\"\n This function determines whether or not\n to make this cloud module available upon execution.\n Most often, it uses get_configured_provider() to determine\n if the necessary configuration has been set up.\n It may also check for necessary imports decide whether to load the module.\n In most cases, it will return a True or False value.\n If the name of the driver used does not match the filename,\n then that name should be returned instead of True.\n\n @return True|False|str\n \"\"\"\n\n if not HAS_LIBS:\n return False\n\n if get_configured_provider() is False:\n return False\n\n # If the name of the driver used does not match the filename,\n # then that name should be returned instead of True.\n # return __virtualname__\n return True\n\n\ndef get_configured_provider():\n \"\"\"\n Return the first configured instance.\n \"\"\"\n configured = config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n () # keys we need from the provider configuration\n )\n log.debug(\"First virtualbox configuration %s\" % configured)\n return configured\n\n\ndef create(vm_info):\n \"\"\"\n Creates a virtual machine from the given VM information.\n This is what is used to request a virtual machine to be created by the\n cloud provider, wait for it to become available,\n and then (optionally) log in and install Salt on it.\n\n Fires:\n \"starting create\" : This event is tagged salt/cloud//creating.\n The payload contains the names of the VM, profile and provider.\n\n @param vm_info {dict}\n {\n name: \n profile: \n provider: \n clone_from: \n }\n @return dict of resulting vm. !!!Passwords can and should be included!!!\n \"\"\"\n log.debug(\"Creating virtualbox with %s\" % vm_info)\n try:\n # Check for required profile parameters before sending any API calls.\n # TODO should this be a call to config.is_provider_configured ?\n if vm_info['profile'] and config.is_profile_configured(\n __opts__,\n __active_provider_name__ or 'virtualbox',\n vm_info['profile']\n ) is False:\n return False\n except AttributeError:\n pass\n\n # For now we can only clone\n if 'clone_from' not in vm_info:\n log.error('\"clone_from\" not in profile configuration!')\n return False\n\n salt.utils.cloud.fire_event(\n 'event',\n 'starting create',\n 'salt/cloud/{0}/creating'.format(vm_info['name']),\n {\n 'name': vm_info['name'],\n 'profile': vm_info['profile'],\n 'provider': vm_info['provider'],\n },\n transport=__opts__['transport']\n )\n\n # TODO Calculate kwargs with parameters required by virtualbox\n # to create the virtual machine.\n request_kwargs = {\n 'name': vm_info['name'],\n 'clone_from': vm_info['clone_from']\n }\n\n salt.utils.cloud.fire_event(\n 'event',\n 'requesting instance',\n 'salt/cloud/{0}/requesting'.format(vm_info['name']),\n request_kwargs,\n transport=__opts__['transport']\n )\n # TODO request a new VM!\n vm_result = vb_clone_vm(**request_kwargs)\n\n # TODO Prepare deployment of salt on the vm\n # Any private data, including passwords and keys (including public keys)\n # should be stripped from the deploy kwargs before the event is fired.\n deploy_kwargs = {\n }\n\n salt.utils.cloud.fire_event(\n 'event',\n 'deploying salt',\n 'salt/cloud/{0}/deploying'.format(vm_info['name']),\n deploy_kwargs,\n transport=__opts__['transport']\n )\n\n deploy_kwargs.update({\n # TODO Add private data\n })\n\n # TODO wait for target machine to become available\n # TODO deploy!\n # Do we have to call this?\n salt.utils.cloud.deploy_script(None, **deploy_kwargs)\n\n salt.utils.cloud.fire_event(\n 'event',\n 'created machine',\n 'salt/cloud/{0}/created'.format(vm_info['name']),\n vm_result,\n transport=__opts__['transport']\n )\n\n # Passwords should be included in this object!!\n return vm_result\n\n\n# -----------------------------\n# Virtualbox methods\n# -----------------------------\n\ndef vb_get_manager():\n # This code initializes VirtualBox manager with default style\n # and parameters\n global _virtualboxManager\n if _virtualboxManager is None:\n _virtualboxManager = VirtualBoxManager(None, None)\n vbox = _virtualboxManager.vbox\n return vbox\n\n\ndef vb_create_machine(name=None):\n vbox = vb_get_manager()\n log.info(\"Create virtualbox machine %s \" % (name,))\n groups = None\n os_type_id = \"Other\"\n new_machine = vbox.createMachine(\n None, # Settings file\n name,\n groups,\n os_type_id,\n None # flags\n )\n vbox.registerMachine(new_machine)\n log.info(\"Finished creating %s\" % name)\n\n\ndef vb_clone_vm(\n name=None,\n clone_from=None,\n timeout=10000):\n \"\"\"\n Tells virtualbox to create a VM\n\n @return dict of resulting VM\n \"\"\"\n vbox = vb_get_manager()\n log.info(\"Clone virtualbox machine %s from %s\" % (name, clone_from))\n\n source_machine = vbox.findMachine(clone_from)\n\n groups = None\n osTypeId = \"Other\"\n new_machine = vbox.createMachine(\n None, # Settings file\n name,\n groups,\n osTypeId,\n None # flags\n )\n\n progress = source_machine.cloneTo(\n new_machine,\n 0, # CloneMode\n None # CloneOptions : None = Full?\n )\n\n progress.waitForCompletion(timeout)\n log.info(\"Finished cloning %s from %s\" % (name, clone_from))\n\n vbox.registerMachine(new_machine)\n\n # TODO return a struct/class that describes a virtual machine\n\n\ndef vb_start_vm(**kwargs):\n \"\"\"\n Tells Virtualbox to start up a VM.\n Blocking function!\n\n @return dict of started VM, contains IP addresses and what not\n \"\"\"\n pass\n\n\ndef vb_destroy_machine(name=None, timeout=10000):\n \"\"\"\n\n @param timeout int timeout in milliseconds\n \"\"\"\n vbox = vb_get_manager()\n log.info(\"Destroying machine %s\" % name)\n machine = vbox.findMachine(name)\n files = machine.unregister(2)\n progress = machine.deleteConfig(files)\n progress.waitForCompletion(timeout)\n log.info(\"Finished destroying machine %s\" % name)\n","repo_name":"LoveIsGrief/saltcloud-virtualbox-provider","sub_path":"virtualbox.py","file_name":"virtualbox.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"39335230322","text":"import torch\nfrom torch._six import inf\n\nfrom apex.multi_tensor_apply import multi_tensor_applier\nimport amp_C\n\nfrom codegeex.megatron import mpu\nfrom codegeex.megatron.model.module import param_is_not_shared\nfrom codegeex.megatron.mpu.layers import param_is_not_tensor_parallel_duplicate\n\n\ndef clip_grad_norm_fp32(parameters, max_norm, norm_type=2):\n \"\"\"Clips gradient norm of an iterable of parameters whose gradients\n are in fp32.\n\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Note that\n the gradients are modified in place.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n\n # Filter parameters based on:\n # - grad should not be none\n # - parameter should not be shared\n # - should not be a replica due to tensor model parallelism\n grads = []\n grads_for_norm = []\n for param in parameters:\n grad_not_none = param.grad is not None\n is_not_shared = param_is_not_shared(param)\n is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param)\n grad = param.grad.detach()\n if grad_not_none:\n # Make sure the grads are in fp32\n assert param.grad.type() == \"torch.cuda.FloatTensor\"\n grads.append(grad)\n if grad_not_none and is_not_shared and is_not_tp_duplicate:\n grads_for_norm.append(grad)\n\n # Norm parameters.\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n total_norm = 0.0\n\n # Calculate norm.\n if norm_type == inf:\n total_norm = max(grad.abs().max() for grad in grads_for_norm)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n # Take max across all model-parallel GPUs.\n torch.distributed.all_reduce(\n total_norm_cuda,\n op=torch.distributed.ReduceOp.MAX,\n group=mpu.get_model_parallel_group(),\n )\n total_norm = total_norm_cuda[0].item()\n\n else:\n if norm_type == 2.0:\n dummy_overflow_buf = torch.cuda.IntTensor([0])\n # Use apex's multi-tensor applier for efficiency reasons.\n # Multi-tensor applier takes a function and a list of list\n # and performs the operation on that list all in one kernel.\n grad_norm, _ = multi_tensor_applier(\n amp_C.multi_tensor_l2norm,\n dummy_overflow_buf,\n [grads_for_norm],\n False, # no per-parameter norm\n )\n # Since we will be summing across data parallel groups,\n # we need the pow(norm-type).\n total_norm = grad_norm ** norm_type\n\n else:\n for grad in grads_for_norm:\n grad_norm = torch.norm(grad, norm_type)\n total_norm += grad_norm ** norm_type\n\n # Sum across all model-parallel GPUs.\n torch.distributed.all_reduce(\n total_norm,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group(),\n )\n total_norm = total_norm.item() ** (1.0 / norm_type)\n\n # Scale.\n clip_coeff = max_norm / (total_norm + 1.0e-6)\n if clip_coeff < 1.0:\n dummy_overflow_buf = torch.cuda.IntTensor([0])\n multi_tensor_applier(\n amp_C.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff\n )\n\n return total_norm\n\n\ndef count_zeros_fp32(parameters):\n\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n\n # Filter parameters based on:\n # - grad should not be none\n # - parameter should not be shared\n # - should not be a replica due to tensor model parallelism\n total_num_zeros = 0.0\n for param in parameters:\n grad_not_none = param.grad is not None\n is_not_shared = param_is_not_shared(param)\n is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param)\n if grad_not_none and is_not_shared and is_not_tp_duplicate:\n grad = param.grad.detach()\n num_zeros = grad.numel() - torch.count_nonzero(grad)\n total_num_zeros = num_zeros + total_num_zeros\n\n # Sum across all model-parallel GPUs.\n torch.distributed.all_reduce(\n total_num_zeros,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group(),\n )\n total_num_zeros = total_num_zeros.item()\n\n return total_num_zeros\n","repo_name":"THUDM/CodeGeeX","sub_path":"codegeex/megatron/optimizer/clip_grads.py","file_name":"clip_grads.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":7272,"dataset":"github-code","pt":"28"}
+{"seq_id":"41430240150","text":"import socket\nimport threading\nimport Queue\nimport time\nimport ip\nimport inspect\n\nclass serverReadThread(threading.Thread):\n\n def __init__(self, anotherPeerClientSocket, serverQueue, connectPoint, connectPointList, fonctionList):\n threading.Thread.__init__(self)\n self.anotherPeerClientSocket = anotherPeerClientSocket\n self.serverQueue = serverQueue\n self.connectPoint = connectPoint\n self.connectPointList = connectPointList\n self.fonctionList = fonctionList\n\n def parser(self, data):\n\n if \" \" in data:\n indexRouter = data.index(\" \")\n command = data[:indexRouter]\n arguments = data[indexRouter+1:]\n else:\n command = data\n arguments = None\n\n if command == \"HELLO\\n\":\n lock1.acquire()\n self.serverQueue.put(\"SALUT N\\n\")\n lock1.release()\n\n elif command == \"CLOSE\\n\":\n lock1.acquire()\n self.serverQueue.put(\"BUBYE\\n\")\n lock1.release()\n\n elif command == \"REGME\":\n indexRouter = arguments.index(\":\")\n ipNo = arguments[:indexRouter]\n portNo = data[indexRouter+1:]\n\n lock2.acquire()\n self.connectPoint = [ipNo, portNo, \"W\"]\n\n i = 0\n registered = False\n for point in self.connectPointList:\n if point[i][0] == self.connectPoint[0] and point[i][1] == self.connectPoint[1]:\n lock1.acquire()\n self.serverQueue.put(\"REGOK \" + time.time())\n lock1.release()\n registered = True\n self.connectPoint = []\n i += 1\n\n lock2.release()\n\n if registered == False:\n lock1.acquire()\n self.serverQueue.put(\"REGWA\\n\")\n lock1.release()\n\n elif command == \"GETNL\":\n\n if self.connectPoint[2] != \"S\":\n lock1.acquire()\n self.serverQueue.put(\"REGER\\n\")\n lock1.release()\n else:\n lock1.acquire()\n self.serverQueue.put(\"NLIST BEGIN\\n\")\n\n if arguments == None:\n i = 0\n lock3.acquire()\n for point in self.connectPointList:\n self.serverQueue.put(point[i][0] + \":\" + point[i][1] + \":\" + point[i][3] + \":P\")\n i += 1\n lock3.release()\n else:\n i = 0\n lock3.acquire()\n for point in self.connectPointList:\n if i < arguments:\n self.serverQueue.put(point[i][0] + \":\" + point[i][1] + \":\" + point[i][3] + \":P\")\n i += 1\n else:\n break\n lock3.release()\n self.serverQueue.put(\"NLIST END\\n\")\n lock1.release()\n\n elif command == \"FUNRQ\":\n if self.connectPoint[2] != \"S\":\n lock1.acquire()\n self.serverQueue.put(\"REGER\\n\")\n lock1.release()\n else:\n noFunction = True\n for i in fonctionList:\n if arguments in fonctionList[i][0]:\n if fonctionList[i][1] == None:\n self.serverQueue.put(\"FUNYS \" + arguments)\n noFunction = False\n break\n else:\n self.serverQueue.put(\"FUNYS \" + arguments + \":\" + arguments[i][1])\n noFunction = False\n break\n\n if noFunction:\n self.serverQueue.put(\"FUNNO \" + arguments)\n\n\n elif command == \"EXERQ\":\n\n if self.connectPoint[2] != \"S\":\n lock1.acquire()\n self.serverQueue.put(\"REGER\\n\")\n lock1.release()\n else:\n indexRouter = arguments.index(\":\")\n fonctionName = arguments[:indexRouter]\n arguments = data[indexRouter+1:]\n\n indexRouter = arguments.index(\":\")\n parameters = arguments[:indexRouter]\n arguments = data[indexRouter+1:]\n\n indexRouter = arguments.index(\":\")\n num = arguments[:indexRouter]\n arguments = data[indexRouter+1:]\n\n indexRouter = arguments.index(\":\")\n md5sum = arguments[:indexRouter]\n arguments = data[indexRouter+1:]\n\n indexRouter = arguments.index(\":\")\n udata = arguments[:indexRouter]\n\n\n noFunction = True\n for i in fonctionList:\n if arguments in fonctionList[i][0]:\n self.serverQueue.put(\"EXEOK \" + md5sum + \":\" + num)\n\n if noFunction:\n self.serverQueue.put(\"EXENF \" + md5sum + \":\" + num)\n\n elif command == \"PATCH\":\n\n if self.connectPoint[2] != \"S\":\n lock1.acquire()\n self.serverQueue.put(\"REGER\\n\")\n lock1.release()\n else:\n indexRouter = arguments.index(\":\")\n md5sum = arguments[:indexRouter]\n arguments = data[indexRouter+1:]\n\n indexRouter = arguments.index(\":\")\n num = arguments[:indexRouter]\n arguments = data[indexRouter+1:]\n\n indexRouter = arguments.index(\":\")\n pdata = arguments[:indexRouter]\n\n\n\n\n def run(self):\n while True:\n data = self.anotherPeerClientSocket.recv()\n self.parser(data)\n\n\n\nclass serverWriteThread(threading.Thread):\n\n def __init__(self, anotherPeerClientSocket, serverQueue):\n threading.Thread.__init__(self)\n self.anotherPeerClientSocket = anotherPeerClientSocket\n self.serverQueue = serverQueue\n\n def run(self):\n\n while True:\n lock1.acquire()\n if not serverQueue.empty():\n response = serverQueue.get()\n lock1.release()\n self.anotherPeerClientSocket.send(response)\n else:\n lock1.release()\n\n\nclass clientReadThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n pass\n\n\n\nclass clientWriteThread(threading.Thread):\n\n def __init__(self, connectPoint, connectPointList, peerClientSocket, serverQueue):\n threading.Thread.__init__(self)\n self.connectPoint = connectPoint\n self.connectPointList = connectPointList\n self.peerClientSocket = peerClientSocket\n self.serverQueue = serverQueue\n\n def run(self):\n\n if not self.connectPoint:\n anotherPeerServerSocketHost = self.connectPoint[1]\n anotherPeerServerSocketPort = self.connectPoint[0]\n\n try:\n self.peerClientSocket.connect(anotherPeerServerSocketHost, anotherPeerServerSocketPort)\n self.connectPoint[2] = \"S\"\n self.connectPoint[3] = time.time()\n lock3.acquire()\n self.connectPointList.append(self.connectPoint)\n lock3.release()\n except:\n lock1.acquire()\n self.serverQueue.put(\"REGER\\n\")\n lock1.release()\n\n\nfonctionList = [ (\"GrayScale\", None), \\\n (\"Binarize\", {\"name\":\"treshold\", \"min\":0, \"max\":255}), \\\n (\"SobelFilter\", {\"name\":\"treshold\", \"min\":0, \"max\":255}) ]\n\nip.main()\n\nconnectPointList = []\n# connectPointList korumak icin\nlock3 = threading.Lock()\n\npeerServerSocket = socket.socket()\n\npeerServerSocketHost = socket.gethostname()\npeerServerSocketPort = 6060\n\npeerServerSocket.bind((peerServerSocketHost, peerServerSocketPort))\npeerServerSocket.listen(5)\n\n\nthreads = []\n\nwhile True:\n serverQueue = Queue.Queue()\n # serverQueue korumak icin\n lock1 = threading.Lock()\n\n connectPoint = []\n # serverQueue korumak icin\n lock2 = threading.Lock()\n\n peerClientSocket = socket.socket()\n anotherPeerClientSocket, anotherPeerClientAddr = peerServerSocket.accept()\n\n lock3.acquire()\n srThread = serverReadThread(anotherPeerClientSocket, serverQueue, connectPoint, connectPointList, fonctionList)\n lock3.release()\n\n swThread = serverWriteThread(anotherPeerClientSocket, serverQueue)\n\n crThread = clientReadThread()\n\n lock3.acquire()\n cwThread = clientWriteThread(connectPoint, connectPointList, peerClientSocket, serverQueue)\n lock3.acquire()\n\n threads.append(srThread)\n threads.append(swThread)\n threads.append(crThread)\n threads.append(cwThread)\n\n srThread.start()\n swThread.start()\n crThread.start()\n cwThread.start()\n\nfor i in threads:\n i.join()","repo_name":"humanitarianist/dagitik","sub_path":"proje/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":8983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"34012691588","text":"import os\nimport json\n\nfrom pymwp.file_io import default_file_out, save_result, load_result, loc\nfrom pymwp import Relation, Choices, Result, Bound\nfrom pymwp.result import FuncResult\n\n\ndef test_file_out_name_wo_path():\n \"\"\"Generating output filename from just a filename without path returns\n result with output directory path.\"\"\"\n input_file = \"my_c_file.c\"\n out_file = default_file_out(input_file)\n\n assert out_file == \"output/my_c_file.json\"\n\n\ndef test_file_out_name_with_path():\n \"\"\"Generating output filename removes original filepath.\"\"\"\n input_file = \"my_dir/of/files/example.c\"\n out_file = default_file_out(input_file)\n\n assert out_file == \"output/example.json\"\n\n\n# noinspection PyUnresolvedReferences\ndef test_save_relation(mocker):\n \"\"\"Method generates directory when it does not exist then saves.\"\"\"\n # mock all built-ins\n mocker.patch('os.path.exists', return_value=False)\n mocker.patch('os.makedirs')\n mocker.patch('builtins.open')\n mocker.patch('json.dump')\n\n result = Result()\n filename = 'fake_path/deep/path/output.txt'\n result.add_relation(\n FuncResult('foo', False, relation=Relation(), choices=Choices()))\n save_result(filename, result)\n\n # it creates directory path when dir/s do not exist\n os.makedirs.assert_called_once_with('fake_path/deep/path')\n # it saves json\n json.dump.assert_called_once()\n\n\ndef test_load_relation(mocker):\n \"\"\"Method generates expected object instance.\"\"\"\n # mock built-ins\n mocker.patch('json.load', return_value={\n \"start_time\": 1682637184575679000,\n \"end_time\": 1682637184576806000,\n \"program\": {\n \"n_lines\": 8,\n \"program_path\": \"c_files/example.c\"\n },\n \"relations\": [{\n \"name\": \"foo\",\n \"variables\": [\"x\", \"y\"],\n \"relation\":\n {\"matrix\": [\n [[{\"scalar\": \"m\", \"deltas\": [(0, 0)]}],\n [{\"scalar\": \"o\", \"deltas\": []}]],\n [[{\"scalar\": \"o\", \"deltas\": []}],\n [{\"scalar\": \"m\", \"deltas\": []}]]]},\n \"choices\": [[[0, 1], [0]]],\n \"bound\": {\"x\": \"x;;\", \"y\": \"y;;x\"},\n \"infinity\": False\n }]\n })\n mocker.patch('builtins.open')\n\n # load the relation\n result = load_result(\"whatever.txt\")\n assert 'foo' in result.relations\n\n foo_res = result.get_func('foo')\n assert isinstance(foo_res.relation, Relation)\n assert foo_res.relation.variables == [\"x\", \"y\"]\n\n assert foo_res.choices.valid == [[[0, 1], [0]]]\n assert foo_res.choices.first == (0, 0)\n assert not foo_res.infinite\n\n # # now check that composed relation matches expectation\n first_mono = foo_res.relation.matrix[0][0].list[0]\n assert first_mono.scalar == \"m\"\n assert first_mono.deltas == [(0, 0)]\n\n # restores bound details correctly\n assert foo_res.bound.bound_dict['x'].x.vars == [\"x\"]\n assert foo_res.bound.bound_dict['y'].x.vars == [\"y\"]\n assert foo_res.bound.bound_dict['y'].z.vars == [\"x\"]\n\n\ndef test_load_relation_infty(mocker):\n \"\"\"Method load infinited result correctly.\"\"\"\n mocker.patch('json.load', return_value={\n \"start_time\": 1682639567996906000,\n \"end_time\": 1682639568022015000,\n \"program\": {\n \"n_lines\": 6,\n \"program_path\": \"example.c\"\n },\n \"relations\": [\n {\n \"name\": \"boohoo\",\n \"infinity\": True,\n \"relation\": None\n }\n ]\n })\n mocker.patch('builtins.open')\n\n # load the relation\n result = load_result(\"whatever.txt\")\n assert 'boohoo' in result.relations\n bh = result.get_func('boohoo')\n assert bh is not None\n assert bh.relation is None\n assert bh.infinite\n\n\ndef test_read_loc(mocker):\n \"\"\"Returns expected number of lines\"\"\"\n mocker.patch(\n 'builtins.open',\n new_callable=mocker.mock_open,\n read_data=\"1\\n2\\n3\\n4\")\n result = loc(\"some_file.c\")\n\n assert result == 4\n","repo_name":"statycc/pymwp","sub_path":"tests/test_file_io.py","file_name":"test_file_io.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"}
+{"seq_id":"35062079418","text":"from functools import lru_cache\n\nclass Solution:\n def colorTheGrid(self, m: int, n: int) -> int:\n MOD = 10 ** 9 + 7\n\n @lru_cache(None)\n def color(m, n, r, c, cur, prev):\n if r == m:\n return color(m, n, 0, c + 1, 0, cur)\n if c == n:\n return 1\n # if r == 0 and dp[c][prev] != -1:\n # return dp[c][prev]\n cnt = 0\n #check if color is above\n up = (cur >> ((r-1)*2)) & 3 if r != 0 else 0\n #check a color is at the left of\n left = prev >> (r * 2) & 3\n for clr in range(1, 4):\n if clr != up and clr != left:\n cnt = (cnt + color(m, n, r + 1, c, cur + (clr << (r * 2)), prev)) % MOD\n # if r == 0:\n # dp[c][prev] = cnt\n return cnt\n\n # dp = [[-1] * 1025 for _ in range(1002)]\n return color(m, n, 0, 0, 0, 0)\n\n\nsol = Solution()\nprint(sol.colorTheGrid(5, 999))\nprint(sol.colorTheGrid(1,2))#6\n","repo_name":"joestalker1/leetcode","sub_path":"src/main/scala/PaintingGridWithThreeDifferentColors.py","file_name":"PaintingGridWithThreeDifferentColors.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"73831044233","text":"import cv2\r\nimport os.path as osp\r\nimport numpy as np\r\n\r\nfile_path = osp.dirname(osp.abspath(__file__))\r\nfile_name = 'sample.jpg'\r\nimg = cv2.imread(osp.join(file_path, file_name))\r\n# img = cv2.resize(img, (0, 0), None, 0.5, 0.5)\r\ncopied1 = img.copy()\r\ncopied2 = img.copy()\r\ncopied3 = img.copy()\r\nh, w, _ = img.shape\r\n\r\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nedges = cv2.Canny(img_gray, 100, 200)\r\n\r\n# Hough Line Transform\r\nlines = cv2.HoughLines(edges, 1, np.pi/180, 70)\r\nfor line in lines:\r\n r, theta = line[0]\r\n tx, ty = np.cos(theta), np.sin(theta)\r\n x0, y0 = tx*r, ty*r\r\n cv2.circle(copied1, (abs(x0), abs(y0)), 3, (0, 0, 255), -1)\r\n\r\n x1, y1 = int(x0 - w*ty), int(y0 + h*tx)\r\n x2, y2 = int(x0 + w*ty), int(y0 - h*tx)\r\n\r\n cv2.line(copied1, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n\r\n# Probabilistic Hough Line Transform\r\nlines = cv2.HoughLinesP(edges, 1, np.pi/180, 5, None, 20, 2)\r\nfor line in lines:\r\n x1, y1, x2, y2 = line[0]\r\n cv2.line(copied2, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n\r\n# Hough Circle Transform\r\nblurred = cv2.GaussianBlur(img_gray, (3, 3), 0)\r\n\r\ncircles = cv2.HoughCircles(blurred, cv2.HOUGH_GRADIENT, 2, 30, None, 200)\r\nif circles is not None:\r\n circles = np.uint16(np.around(circles))\r\n for x, y, r in circles[0, :]:\r\n cv2.circle(copied3, (x, y), r, (255, 0, 0), 2)\r\n cv2.circle(copied3, (x, y), 4, (0, 0, 255), -1)\r\n\r\nmerged1 = np.hstack((img, copied1))\r\nmerged2 = np.hstack((copied2, copied3))\r\nmerged = np.vstack((merged1, merged2))\r\ncv2.imshow('Hough', merged)\r\ncv2.imshow('Edge', edges)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()","repo_name":"bagineer/opencv_tutorial","sub_path":"ch_23/hough.py","file_name":"hough.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"12089511695","text":"import scanpy as sc\nfrom scvi.inference import TotalPosterior\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors, KNeighborsRegressor\nimport scipy\nimport torch\nfrom tqdm.auto import tqdm\nimport statsmodels.api as sm\nimport phenograph\nfrom sklearn.metrics import (\n adjusted_rand_score,\n adjusted_mutual_info_score,\n fowlkes_mallows_score,\n silhouette_score,\n silhouette_samples,\n)\nimport hotspot\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom statsmodels.stats.multitest import multipletests\n\n\ndef set_seed(seed):\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n\n\ndef rank_genes_groups_totalVI(\n adata: sc.AnnData,\n scvi_posterior: TotalPosterior,\n n_samples: int = None,\n M_permutation: int = None,\n n_features: int = 25,\n protein_only: bool = True,\n gene_only: bool = False,\n label_name: str = \"louvain_scvi\",\n) -> pd.DataFrame:\n \"\"\"\n Rank genes for characterizing groups.\n Computes Bayes factor for each cluster against the others to test for differential expression.\n See Nature article (https://rdcu.be/bdHYQ)\n\n :param adata: sc.AnnData object non-normalized\n :param scvi_posterior:\n :param n_samples:\n :param M_permutation:\n :param n_genes:\n :param label_name: The groups tested are taken from adata.obs[label_name] which can be computed\n using clustering like Louvain (Ex: sc.tl.louvain(adata, key_added=label_name) )\n :return: Summary of Bayes factor per gene, per cluster\n \"\"\"\n\n # Call scvi function\n per_cluster_de, cluster_id = scvi_posterior.one_vs_all_degenes(\n cell_labels=np.asarray(adata.obs[label_name].values).astype(int).ravel(),\n min_cells=1,\n n_samples=n_samples,\n M_permutation=M_permutation,\n )\n\n # convert to ScanPy format -- this is just about feeding scvi results into a format readable by ScanPy\n markers = []\n scores = []\n names = []\n for i, x in enumerate(per_cluster_de):\n subset_de = x[:n_features]\n markers.append(subset_de)\n scores.append(tuple(subset_de[\"bayes_factor\"].values))\n names.append(tuple(subset_de.index.values))\n\n markers = pd.concat(markers)\n dtypes_scores = [(str(i), \" 2:\n raise ValueError(\"Should be only two clusters for this metric\")\n frequency = np.mean(hist_data == 1)\n if frequency == 0 or frequency == 1:\n return 0\n return -frequency * np.log(frequency) - (1 - frequency) * np.log(1 - frequency)\n\n def neg_kl(hist_data, global_freq):\n n_batches = len(np.unique(hist_data))\n if n_batches > 2:\n raise ValueError(\"Should be only two clusters for this metric\")\n frequency = np.mean(hist_data == 1)\n if frequency == 0 or frequency == 1:\n return 0\n return -(\n frequency * np.log(frequency / global_freq)\n + (1 - frequency) * np.log((1 - frequency) / (1 - global_freq))\n )\n\n n_neighbors = min(n_neighbors, len(latent_space) - 1)\n nne = NearestNeighbors(n_neighbors=1 + n_neighbors, n_jobs=8)\n nne.fit(latent_space)\n kmatrix = nne.kneighbors_graph(latent_space) - scipy.sparse.identity(\n latent_space.shape[0]\n )\n\n global_freq = np.mean(batches)\n print(global_freq)\n score = 0\n for t in range(n_pools):\n indices = np.random.choice(\n np.arange(latent_space.shape[0]), size=n_samples_per_pool\n )\n score += np.mean(\n [\n neg_kl(\n batches[ # the batches of cell i's neighbors\n kmatrix[indices].nonzero()[\n 1\n ][ # the neighbors of cell i (columns in row i)\n kmatrix[indices].nonzero()[0] == i # the row of cell i\n ]\n ],\n global_freq,\n )\n for i in range(n_samples_per_pool)\n ]\n )\n return score / float(n_pools)\n\n\ndef clustering_metric_silhoutte(\n adata1,\n adata2,\n adata,\n batchid,\n metric=\"euclidean\",\n k=30,\n use_rep=\"X_pca\",\n resolution=0.8,\n n_clusters=25,\n):\n\n sc.pp.neighbors(adata1, n_neighbors=k, use_rep=use_rep, metric=metric)\n sc.pp.neighbors(adata2, n_neighbors=k, use_rep=use_rep, metric=metric)\n adata_joint_1 = adata[batchid == 0].copy()\n adata_joint_2 = adata[batchid == 1].copy()\n sc.pp.neighbors(adata_joint_1, n_neighbors=k, use_rep=use_rep, metric=metric)\n sc.pp.neighbors(adata_joint_2, n_neighbors=k, use_rep=use_rep, metric=metric)\n\n sc.tl.leiden(adata1, key_added=\"leiden_clus_metric\", resolution=resolution)\n sc.tl.leiden(adata2, key_added=\"leiden_clus_metric\", resolution=resolution)\n\n cluster1 = adata1.obs[\"leiden_clus_metric\"].astype(int).values\n cluster2 = adata2.obs[\"leiden_clus_metric\"].astype(int).values\n\n batch1_score = silhouette_samples(\n adata_joint_1.uns[\"neighbors\"][\"distances\"], cluster1\n )\n batch2_score = silhouette_samples(\n adata_joint_2.uns[\"neighbors\"][\"distances\"], cluster2\n )\n\n batch1_score_same = silhouette_score(adata1.uns[\"neighbors\"][\"distances\"], cluster1)\n\n batch2_score_same = silhouette_score(adata2.uns[\"neighbors\"][\"distances\"], cluster2)\n\n return (\n np.mean(batch1_score - batch1_score_same),\n np.mean(batch2_score - batch2_score_same),\n )\n\n\ndef hotspot_score(\n full_adata,\n latent1,\n latent2,\n latent_joint,\n batches,\n k=30,\n subset_features=None,\n weighted_graph=True,\n):\n if subset_features is not None:\n full_adata = full_adata[:, subset_features].copy()\n scaler = StandardScaler()\n joint_data = scaler.fit_transform(full_adata.X).T\n joint_data = pd.DataFrame(joint_data, index=full_adata.var_names)\n b1_data = pd.DataFrame(\n scaler.fit_transform(full_adata.X[batches == 0]).T, index=full_adata.var_names\n )\n b2_data = pd.DataFrame(\n scaler.fit_transform(full_adata.X[batches == 1]).T, index=full_adata.var_names\n )\n\n hs_joint_1 = hotspot.Hotspot(\n b1_data, model=\"none\", latent=pd.DataFrame(latent_joint[batches == 0]),\n )\n\n hs_joint_1.create_knn_graph(\n weighted_graph=weighted_graph, n_neighbors=k,\n )\n hs_joint_1_results = hs_joint_1.compute_autocorrelations(jobs=10)\n\n hs_joint_2 = hotspot.Hotspot(\n b2_data, model=\"none\", latent=pd.DataFrame(latent_joint[batches == 1]),\n )\n\n hs_joint_2.create_knn_graph(\n weighted_graph=weighted_graph, n_neighbors=k,\n )\n hs_joint_2_results = hs_joint_2.compute_autocorrelations(jobs=10)\n\n hs_1 = hotspot.Hotspot(b1_data, model=\"none\", latent=pd.DataFrame(latent1),)\n\n hs_1.create_knn_graph(\n weighted_graph=weighted_graph, n_neighbors=k,\n )\n hs_1_results = hs_1.compute_autocorrelations(jobs=10)\n\n hs_2 = hotspot.Hotspot(b2_data, model=\"none\", latent=pd.DataFrame(latent2),)\n\n hs_2.create_knn_graph(\n weighted_graph=weighted_graph, n_neighbors=k,\n )\n hs_2_results = hs_2.compute_autocorrelations(jobs=10)\n\n hs_1_results = hs_1_results.loc[hs_joint_1_results.index]\n hs_2_results = hs_2_results.loc[hs_joint_2_results.index]\n\n res1 = np.mean((hs_joint_1_results - hs_1_results)[\"Z\"])\n print(np.var((hs_joint_1_results - hs_1_results)[\"Z\"]))\n print(np.var((hs_joint_2_results - hs_2_results)[\"Z\"]))\n res2 = np.mean((hs_joint_2_results - hs_2_results)[\"Z\"])\n\n return 0.5 * (res1 + res2), hs_joint_1_results, hs_1_results\n\n\ndef seurat_v3_highly_variable_genes(adata, n_top_genes=4000, use_lowess=False):\n norm_gene_vars = []\n del_batch = False\n if \"batch\" not in adata.obs_keys():\n del_batch = True\n adata.obs[\"batch\"] = np.zeros((adata.X.shape[0]))\n for b in np.unique(adata.obs[\"batch\"]):\n var = adata[adata.obs[\"batch\"] == b].X.var(0)\n print(var.shape)\n mean = adata[adata.obs[\"batch\"] == b].X.mean(0)\n estimat_var = np.zeros((adata.X.shape[1]))\n\n y = np.log10(var)\n x = np.log10(mean)\n if use_lowess is True:\n lowess = sm.nonparametric.lowess\n # output is sorted by x\n v = lowess(y, x, frac=0.15)\n estimat_var[np.argsort(x)] = v[:, 1]\n else:\n estimat_var = loess(y, x)\n\n norm_values = (adata[adata.obs[\"batch\"] == b].X - mean) / np.sqrt(\n 10 ** estimat_var\n )\n print(norm_values.shape)\n # as in seurat paper, clip max values\n norm_values = np.clip(\n norm_values, None, np.sqrt(np.sum(adata.obs[\"batch\"] == b))\n )\n norm_gene_var = norm_values.var(0)\n norm_gene_vars.append(norm_gene_var.reshape(1, -1))\n\n norm_gene_vars = np.concatenate(norm_gene_vars, axis=0)\n ranked_norm_gene_vars = np.argsort(np.argsort(norm_gene_vars, axis=1), axis=1)\n mean_norm_gene_vars = np.mean(norm_gene_vars, axis=0)\n median_ranked = np.median(ranked_norm_gene_vars, axis=0)\n\n num_batches_high_var = np.sum(\n ranked_norm_gene_vars >= (adata.X.shape[1] - n_top_genes), axis=0\n )\n df = pd.DataFrame(index=np.array(adata.var_names))\n df[\"highly_variable_n_batches\"] = num_batches_high_var\n df[\"highly_variable_median_rank\"] = median_ranked\n\n df[\"highly_variable_mean_variance\"] = mean_norm_gene_vars\n df.sort_values(\n [\"highly_variable_n_batches\", \"highly_variable_median_rank\"],\n ascending=False,\n na_position=\"last\",\n inplace=True,\n )\n df[\"highly_variable\"] = False\n df.loc[:n_top_genes, \"highly_variable\"] = True\n df = df.loc[adata.var_names]\n\n if del_batch is True:\n del adata.obs[\"batch\"]\n\n adata.var[\"highly_variable\"] = df[\"highly_variable\"].values\n adata.var[\"highly_variable_n_batches\"] = df[\"highly_variable_n_batches\"].values\n adata.var[\"highly_variable_mean_variance\"] = df[\n \"highly_variable_mean_variance\"\n ].values\n\n\ndef loess(y, x, span=0.3):\n from rpy2.robjects import r\n import rpy2.robjects as robjects\n\n a, b = robjects.FloatVector(x), robjects.FloatVector(y)\n df = robjects.DataFrame({\"a\": a, \"b\": b})\n loess_fit = r.loess(\"b ~ a\", data=df, span=span)\n\n return np.array(loess_fit[loess_fit.names.index(\"fitted\")])\n\n\ndef glm_de(\n protein_adata,\n cell_type_1,\n cell_type_2,\n cell_type_key=\"cell_types\",\n batch_key=\"batch\",\n use_raw=True,\n family=\"Gaussian\",\n):\n group1 = protein_adata.obs[cell_type_key] == cell_type_1\n group2 = protein_adata.obs[cell_type_key] == cell_type_2\n groups = np.logical_or(group1, group2).ravel()\n\n adata_sub = protein_adata.raw.X[groups]\n labels_sub = protein_adata.obs[cell_type_key].values[groups]\n batch_sub = protein_adata.obs[batch_key].values[groups].ravel()\n cell_types_sub = labels_sub == cell_type_1\n pvals, coefs = _glm_fit(adata_sub, batch_sub, cell_types_sub, family=family)\n\n _, pvals_adj, _, _ = multipletests(pvals, alpha=0.05, method=\"fdr_bh\")\n\n df = pd.DataFrame(\n index=protein_adata.var_names, columns=[\"pvals\", \"pvals_adj\", \"coeff\"]\n )\n df[\"pvals\"] = pvals\n df[\"pvals_adj\"] = pvals_adj\n df[\"coeff\"] = coefs\n df = df.sort_values(\"coeff\", ascending=False)\n\n return df\n\n\ndef _glm_fit(protein_data, batch, cell_types, family=\"Gaussian\"):\n import statsmodels.api as sm\n import statsmodels.formula.api as smf\n\n # const = np.ones((len(protein_data), 1))\n df = pd.DataFrame()\n df[\"cell_type_1\"] = cell_types\n # ct = cell_types.reshape(-1, 1)\n df[\"batch\"] = batch.ravel() + 1\n\n p_vals = []\n coefs = []\n for p in tqdm(range(protein_data.shape[1])):\n # b = pd.get_dummies(batch).values\n # exog = np.concatenate([ct, b, const], axis=1)\n # print(exog.shape)\n if family == \"Gaussian\":\n f = sm.families.Gaussian()\n y = protein_data[:, p]\n elif family == \"Poisson\":\n f = sm.families.Poisson()\n y = np.expm1(protein_data[:, p])\n else:\n raise ValueError(\"incorrect family\")\n df[\"expression\"] = y\n\n # model = sm.GLM(y, exog, family=f)\n model = smf.glm(\"expression ~ C(cell_type_1) + C(batch)\", data=df, family=f)\n model_results = model.fit()\n p_vals.append(model_results.pvalues[1])\n coefs.append(model_results.params[1])\n\n return np.array(p_vals), np.array(coefs)\n","repo_name":"YosefLab/totalVI_reproducibility","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13405,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"27"}
+{"seq_id":"23966897163","text":"import sys\nimport imageio.v2 as iio\nimport itertools\nimport numpy\nimport en_de_cryption as ed\n\n\"\"\"\nNotes in terms of to do: I want to make the encoding better to spread it over 3 pixels so that the data is much less visible when it's being stored in the image\n So I need to catch the exception of the password being wrong and exit with an incorrect password error. \n \n\"\"\"\n\n\ndef convert_text_to_ascii(text):\n text_dec = []\n for character in text:\n text_dec.append(ord(character))\n return text_dec\n\n\ndef convert_ascii_to_text(input_ascii):\n output = \"\"\n for c in input_ascii:\n output += chr(c)\n return output\n\n\ndef encode_in_image(image, text):\n i = 0\n location = [[]]\n for x, y, z in itertools.product(range(image.shape[0]), range(image.shape[1]), range(image.shape[2])):\n if image[x][y - 1][z] == image[x][y][z] and image[x][y][z] + text[i] <= 255 and i < (len(text) - 1):\n image[x][y][z] += text[i]\n location.append([x, y, z])\n i += 1\n elif image[x][y - 1][z] == image[x][y][z] and image[x][y][z] + text[i] <= 255 and i == (len(text) - 1):\n image[x][y][z] += text[i]\n location.append([x, y, z])\n break\n elif text[i] >= 255:\n i += 1\n return location\n\n\ndef better_encode_in_image(image, text):\n i = 0\n location = [[]]\n for x, y, z in itertools.product(range(image.shape[0]), range(image.shape[1]), range(image.shape[2])):\n if i < (len(text) - 1) and all(image[x][y - 1][t] == image[x][y][t] for t in range(0, 3)):\n if text[i] % 3 == 0 and all((255 - image[x][y - 1][t]) > text[i] / 3 for t in range(0, 3)):\n image[x][y][0] += text[i] / 3\n image[x][y][1] += text[i] / 3\n image[x][y][2] += text[i] / 3\n location.append([x, y])\n i += 1\n elif text[i] % 3 == 1 and all((255 - image[x][y - 1][t]) > (text[i] / 3 + 1) for t in range(0, 3)):\n image[x][y][0] += text[i] / 3\n image[x][y][1] += text[i] / 3\n image[x][y][2] += (1 + text[i] / 3)\n location.append([x, y])\n i += 1\n elif text[i] % 3 == 2 and all((255 - image[x][y - 1][t]) > (text[i] / 3 + 1) for t in range(0, 3)):\n image[x][y][0] += text[i] / 3\n image[x][y][1] += (1 + text[i] / 3)\n image[x][y][2] += (1 + text[i] / 3)\n location.append([x, y])\n i += 1\n elif text[i] >= 255:\n i += 1\n return location\n\n\ndef decode_from_image(data_matrix, encoded_image):\n ascii_out = []\n for j in data_matrix:\n x = int(j[0])\n y = int(j[1])\n ascii_out.append(int(encoded_image[x][y][0]) + int(encoded_image[x][y][1]) + int(encoded_image[x][y][2]) - int(encoded_image[x][y - 1][0]) - int(encoded_image[x][y - 1][1]) - int(encoded_image[x][y - 1][2]))\n return ascii_out\n\n\ndef matrix_to_string(matrix):\n out_array = []\n out_string = \"\"\n i = 0\n for j in matrix:\n if i > 0:\n out_array.append(j[0])\n out_array.append(j[1])\n i += 1\n for k in out_array:\n out_string += \"|\" + str(k)\n return out_string\n\n\ndef token_to_location_matrix(input_token):\n input_token = input_token[2:-1]\n try:\n decrypted_string = ed.password_decrypt(input_token.encode(), passwrd).decode()\n except Exception:\n print(\"Incorrect password, please try again with the right password.\")\n sys.exit(0)\n decrypted_split_string = decrypted_string.split(\"|\")[1:]\n out_array = []\n for i in decrypted_split_string:\n out_array.append(int(i))\n out_location = numpy.asarray(out_array)\n no_of_rows = int(len(out_location) / 2)\n out_matrix = out_location.reshape(no_of_rows, 2)\n return out_matrix\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n mode = args[0].lower()\n\n if mode == \"encode\":\n passwrd = args[3]\n image_path = args[1]\n text_path = args[2]\n\n image_decimal = iio.imread(image_path)\n with open(text_path, 'r') as f:\n text_string = f.read()\n text_decimal = convert_text_to_ascii(text_string)\n encoded_data_location = better_encode_in_image(image_decimal, text_decimal)\n encoded_data_array = matrix_to_string(encoded_data_location)\n encoded_data_array_lock = ed.password_encrypt(encoded_data_array.encode(), passwrd)\n with open(\"encrypted_key.txt\", 'w') as f:\n f.write(str(encoded_data_array_lock))\n iio.imwrite(\"output_image.png\", image_decimal)\n\n elif mode == \"decode\":\n passwrd = args[3]\n encoded_image_path = args[1]\n encoded_data_path = args[2]\n\n encoded_image_decimal = iio.imread(encoded_image_path)\n with open(encoded_data_path, 'r') as f:\n encoded_data_string_lock = f.read()\n encoded_data_matrix = token_to_location_matrix(encoded_data_string_lock)\n output_ascii = decode_from_image(encoded_data_matrix, encoded_image_decimal)\n output_text = convert_ascii_to_text(output_ascii)\n with open(\"output_text.txt\", 'w') as f:\n n = f.write(output_text)\n print(output_text)\n\n elif mode == \"help\":\n print(\"\\n\"\n \"This program encodes a text files into an image please use accordingly.\\n\"\n \"First Parameter: encode, decode, or help\\n\"\n \"Second Parameter: path to the image used for encoding in the case of u-\\n\"\n \"sing the encode option, or path to image to be decoded in a decode opti-\\n\"\n \"on.\\n\"\n \"Third parameter: path to the text file for encoding or path to the file \\n\"\n \"Fourth parameter: password to lock or unlock the encoding location data \\n\"\n \"containing the location of the pixels to be decoded.\\n\")\n else:\n print(\"\\n\"\n \"Please enter a valid input parameter. Use help for more details.\\n\")\n","repo_name":"Jasper-cont/Steganography_in_image","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"37004912908","text":"# ABC148E - Double Factorial\n# Bootcamp For Beginners - Hard\n# URL: https://atcoder.jp/contests/abc148/tasks/abc148_e\n# Date: 2021/01/13\n\n# ---------- Ideas ----------\n#\n\n# ------------------- Solution --------------------\n# 5**i*2ごとに,10の増え方が繰り上がるので,このリストlをつくる\n# iが大きい順に5**i*2でnを割ってみて,商aが1以上なら,lのi番目をansに加える\n# nを5**i*2で割った余りで更新する\n\n\n# ------------------- Answer --------------------\n#code:python\nn = int(input())\nif n % 2 == 1 or n < 10:\n print(0); exit()\nelse:\n ans = 0\n l = [0]*30\n l[0] = 0\n l[1] = 0\n l[2] = 6\n for i in range(3, 30):\n l[i] = (l[i-1])*5+1\n\n for i in reversed(range(30)):\n a = n // (5**i*2)\n if a > 0:\n ans += l[i]*a\n n %= (5**i*2)\n if n < 50:\n break\n print(ans + n//10)\n\n\n# 解説: https://img.atcoder.jp/abc148/editorial.pdf\n# nが10(=5**1*2)で割れる回数,50(=5**2*2)で割れる回数,250(=5**3*2)で割れる回数をどんどん足していけばいい\nn = int(input())\nif n % 2 == 1 or n < 10:\n print(0)\nelse:\n ans = 0\n for i in range(1, 30):\n ans += n // (5**i*2)\n print(ans)\n\n# ------------------ Sample Input -------------------\n12\n\n5\n\n1000000000000000000\n\n100\n130\n\n# ----------------- Length of time ------------------\n# 67分\n\n# -------------- Editorial / my impression -------------\n# https://img.atcoder.jp/abc148/editorial.pdf\n# とても大変だった\n# 簡単に考えることができなかったため,複雑なコードになっちゃった\n# 解説とやってることは同じ\n# 復習してもよさそう。\n\n# ----------------- Category ------------------\n#AtCoder\n#BootcampForBeginners-hard\n#wanna_review #hard復習 #復習したい\n#ABC-E\n#pで何回割れるか\n#緑diff\n#整数問題\n\n\n\n# 実験\n\n#\n# def prime_factorize(n: int):\n# # 試し割り法による素因数分解\n# # https://en.wikipedia.org/wiki/Trial_division\n# factors = []\n# while n % 2 == 0:\n# factors.append(2)\n# n //= 2\n# f = 3 # 奇数でどんどん割っていって,素数を探す。\n# while f * f <= n:\n# if n % f == 0:\n# factors.append(f)\n# n //= f # nをfで割って減らす。\n# else:\n# f += 2 # 奇数なので+2ずつ足していく。\n# if n != 1: factors.append(n)\n# # Only odd number is possible\n# return factors\n#\n# def give_ans(n):\n# ans = 0\n# for i in range(2, n+1, 2):\n# p = prime_factorize(i)\n# ans += p.count(5)\n# return ans\n# give_ans(250)\n# give_ans(1250)\n# give_ans(5**1)\n# give_ans(5**2)\n# give_ans(5**3)\n# give_ans(5**4)\n# give_ans(5**5)\n# give_ans(5**6)\n# give_ans(5**7)\n# give_ans(5**8)\n","repo_name":"shimmee/competitive-programming","sub_path":"AtCoder/Practice/Bootcamp for Beginners/hard/ABC148E.py","file_name":"ABC148E.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13839052629","text":"import sys, getopt, math, json, requests, pandas as pd, glob, time\nimport urllib.parse\nfrom linkheader_parser import parse\n\ndef runQuery(query):\n flag = 1\n while flag == 1:\n try:\n print(\"Querying: \" + query )\n headers = {\n 'Authorization': 'Token PASTE_HERE'\n }\n response = requests.request(\"GET\", query, headers=headers)\n txt = json.loads(response.text)\n message = txt['message']\n print(\"I came here!\")\n except (KeyError,TypeError) as e:\n return response\n if \"exceeded\" in message:\n print(\"Limit exceeded: Trying query again in 30 seconds ...\")\n time.sleep(30)\n flag = 1\n else:\n return response\n return response\n \ndef getNextLink(response):\n response.headers.setdefault('Link', 'no-link')\n nextLink = urllib.parse.unquote(response.headers['Link'])\n #print(\"Total records: \",txt['total_count'])\n #print(nextLink)\n if nextLink != \"no-link\":\n result = parse(nextLink) # Parses header as JSON object\n try:\n return result['next']['url']\n except KeyError:\n return \"no-link\"\n return \"no-link\"\n \ndef dumpResponse(response,outputFile):\n with open(outputFile, \"w\") as outfile:\n txt = json.loads(response.text)\n json.dump(txt, outfile)\n print(\"JSON file printed to: \" + outputFile)\n\ndef dumpData(data,outputFile):\n with open(outputFile, \"w\") as outfile:\n json.dump(data, outfile)\n print(\"Object dumped to: \" + outputFile)\n\ndef queryiOSApps(outputFileIOSPath):\n print('Started querying')\n query = \"https://api.github.com/search/code?q=AppDelegate+in:file&page=1\"\n nextLink = \"\"\n apps = []\n while nextLink != \"no-link\":\n response = runQuery(query)\n responseJson = json.loads(response.text)\n for item in responseJson['items']:\n link = item['repository']['html_url'] #looks like: https://github.com/mohammedhossam95/ToDoApp\n commitsUrlToken = link.split(\"https://github.com/\")[1] # looks like: mohammedhossam95/ToDoApp\n temp = commitsUrlToken.split(\"/\") # looks like: {mohammedhossam95,ToDoApp}\n user = temp[0] #looks like: mohammedhossam95\n repo = temp[1] #looks like: ToDoApp\n commitsUrl = \"https://api.github.com/repos/\"+commitsUrlToken+\"/commits\"\n repoRecord = {\"repositoryUrl\":\"NA\",\"author\":\"NA\",\"repository\":\"NA\", \"commitsUrl\":commitsUrl ,\"commits\":\"NotRetrieved\"}\n repoRecord[\"repositoryUrl\"] = link\n repoRecord[\"author\"] = user\n repoRecord[\"repository\"] = repo\n apps.append(repoRecord)\n nextLink = getNextLink(response)\n query = nextLink\n print(nextLink)\n \n data = {\"apps\":apps}\n dumpData(data,outputFileIOSPath)\n \n \ndef fetchAllCommits(query,commitsList,nextLink):\n response = runQuery(query)\n nextLink = getNextLink(response)\n if nextLink == \"no-link\":\n return commitsList\n commitMetaList = json.loads(response.text)\n for commitMeta in commitMetaList:\n commitsList.append(commitMeta['sha'])\n commitsList = fetchAllCommits(nextLink,commitsList,\"\")\n return commitsList\n \ndef queryCommits(inputFileIOSPath,outputUserPath):\n recordNumber = 1\n commitsList = []\n nextLink = \"\"\n with open(inputFileIOSPath, 'r') as openfile:\n iosApps = json.load(openfile)\n for app in iosApps['apps']:\n commitsLink = app['commitsUrl']\n commitsList = []\n commits = fetchAllCommits(commitsLink,commitsList,nextLink) # Recusive method\n app['commits'] = commits\n dumpData(app,outputUserPath + \"/\" + str(recordNumber) + \".json\")\n app['commits'] = len(commits)\n recordNumber = recordNumber + 1\n dumpData(iosApps,outputUserPath + \"/iosAppsCommits.json\")\n\ndef queryRateLimit():\n print(runQuery(\"https://api.github.com/rate_limit\").text)\n \ndef main(argv):\n try:\n opts, args = getopt.getopt(argv,\"ho:\")\n except getopt.GetoptError as e:\n print(argv)\n print(\"script.py -o \")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print(\"script.py -o \")\n sys.exit()\n if opt == '-o':\n outputCsvPath = arg\n \n print(\"outputCsvPath file is: \" + outputCsvPath)\n outputFileIOSPath = outputCsvPath + \"/iosApps.json\"\n testFile = outputCsvPath + \"/test.json\"\n #queryRateLimit()\n queryiOSApps(outputFileIOSPath)\n inputFileIOSPath = outputFileIOSPath\n outputUserPath = outputCsvPath\n #queryCommits(inputFileIOSPath,outputUserPath)\n #response = runQuery(\"https://api.github.com/repos/ArtemVasilenko/vasyaMaps/commits\")\n #reply = json.loads(response.text)\n #dumpData(reply,testFile)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"AbdulAli/open-source-mobile-apps","sub_path":"iosApps/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"21593886764","text":"from .forms import *\nfrom django.shortcuts import render, redirect\nimport json\nfrom django.contrib import messages\nfrom .methods import *\nfrom django.http import HttpResponse\n\n\nwith open(\"web/static/web/data.json\",\"r\") as file:\n data = json.load(file)\n\neng = json.load(open(\"web/static/web/english.json\"))\n\nsearch = None\n\ndef find_matching_keys(dict, value):\n matching_keys = []\n for key in dict:\n if value in key.capitalize():\n matching_keys.append(key)\n elif value.capitalize() in key:\n matching_keys.append(key)\n\n if not matching_keys == []:\n if len(matching_keys) == 1:\n return matching_keys[0]\n else:\n return matching_keys[0],matching_keys[1]\n else:\n return None\n\n\ndef translateh(word):\n word = word.capitalize()\n if word in data:\n return data[word]\n elif word.lower() in data:\n return data[word.lower()]\n elif word.upper() in data:\n return data[word.upper()]\n else:\n return None\n\ndef index(request):\n recent = getrecentHinglish(request)\n if request.method == \"POST\":\n form = getform(request.POST)\n if form.is_valid():\n word = form.cleaned_data[\"word\"]\n search = translateh(word)\n if search is None:\n suggested = find_matching_keys(data, word)\n messages.info(request, f\"{word} is not Present in Hinglish Dictionary\") \n html = render(request, \"web/index.html\", {\n \"form\": getform(),\n \"result\" : search,\n \"word\":word,\n \"suggest\":suggested,\n \"recent\":recent\n })\n setrecentHinglish(request,html,word.capitalize())\n return html\n \n html = render(request, \"web/index.html\", {\n \"form\": getform(),\n \"result\" : search,\n \"word\":word,\n \"recent\":recent\n })\n setrecentHinglish(request,html,word.capitalize())\n return html\n else:\n return render(request, \"web/index.html\", {\n \"form\": form,\n \"recent\":recent\n })\n else:\n return render(request , \"web/index.html\",{\n \"form\": getform(),\n \"recent\":recent,\n })\n\n\ndef engtranslate(word):\n word = word.lower()\n if word in eng:\n return eng[word]\n elif word.title() in eng:\n return eng[word.title()]\n elif word.upper() in eng:\n return eng[word.upper()]\n else:\n return None\n\ndef english(request):\n recent = getrecentEnglish(request)\n if request.method == \"POST\":\n form = getform(request.POST)\n if form.is_valid():\n word = form.cleaned_data[\"word\"]\n search = engtranslate(word)\n if search is None:\n suggested = find_matching_keys(eng, word.capitalize())\n messages.info(request, f\"{word} is not Present in English Dictionary\") \n html = render(request, \"web/index.html\", {\n \"form\": getform(),\n \"result\" : search,\n \"word\":word,\n \"suggest\":suggested,\n \"recent\":recent\n })\n setrecentEnglish(request,html,word.capitalize())\n return html\n html = render(request, \"web/english.html\", {\n \"form\": getform(),\n \"result\" : search,\n \"word\":word,\n \"recent\":recent\n })\n setrecentEnglish(request,html,word)\n return html\n \n else:\n return render(request, \"web/english.html\", {\n \"form\": form,\n \"recent\":recent\n })\n else:\n return render(request , \"web/english.html\",{\n \"form\": getform(),\n \"recent\":recent\n })\n \n\ndef about(request):\n return render(request, \"web/about.html\")\n\n\ndef error_404(request):\n path = request.path\n return render(request, \"web/404.html\",{\n \"path\":path\n })\n\n\ndef suggestion(request):\n if request.method == \"POST\":\n form = suggest_form(request.POST)\n form.save()\n return redirect(\"hinglish\")\n else:\n suggestform = suggest_form()\n return render(request, \"web/suggestion.html\", {\n \"suggest\":suggestform\n })\n \n\ndef feedback(request):\n if request.method == \"POST\":\n name = request.POST[\"name\"]\n email = request.POST[\"email\"]\n subject = request.POST[\"subject\"]\n message = request.POST[\"message\"]\n feedback = Feedback(Name = name, Email = email, Subject = subject, Message = message)\n feedback.save()\n subject = 'Thank you For Sharing Your feedback at Hinglish Dictionary'\n message = f'Hi {name}, Thank you for Sharing You Valuable Feedback About Hinglish Dictionary with US. Please Stay Connect With US on Our Website. Hope You are Having a Great Time. You can Visit Our Website from Following Link. Hinglish Dictionary - https://hinglish.pythonanywhere.com'\n sendmail(subject, message, email)\n return redirect(\"hinglish\")\n return render(request, \"web/feedback.html\")\n\n\n# def AddHinglishWord(request):\n# with open(\"web/static/web/exmp.json\",'r') as file:\n# ex = json.load(file)\n# if request.method == \"POST\":\n# form = suggest_form(request.POST)\n# word = form[\"word\"]\n# meaning = form[\"meaning\"]\n# ex[word] = meaning\n# with open(\"web/static/web/exmp.json\", 'a') as js:\n# js.write(ex)\n# js.close()\n# return redirect(\"hinglish\")\n# else:\n# suggestform = suggest_form()\n# return render(request, \"web/suggestion.html\", {\n# \"suggest\":suggestform\n# })","repo_name":"anmolkk/Hinglishh","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"74704956230","text":"from django_tables2 import SingleTableView\nfrom django.views.generic import DetailView, ListView, UpdateView, CreateView\nfrom django.contrib import messages\n\n\nclass commonListView(SingleTableView):\n model = None\n table_class = None\n\n object_name = None\n object_icon = None\n\n template_name = \"business/object_list.html\"\n\n action_new = None\n action_list = None\n\n active_app = None\n active_apptitle = None\n\n table_pagination = {'per_page': 15}\n\n def get_context_data(self, **kwargs):\n context = super(commonListView, self).get_context_data(**kwargs)\n context['active_app'] = self.active_app\n context['active_apptitle'] = self.active_apptitle\n context['object_icon'] = self.object_icon\n context['object_name'] = self.object_name\n if self.action_new:\n context['action_new'] = self.action_new\n if self.action_list:\n context['action_list'] = self.action_list\n return context\n\n\nclass commonCreateView(CreateView):\n model = None\n form_class = None\n\n object_name = None\n object_icon = None\n\n template_name = \"business/object_form.html\"\n success_url = None\n action_list = None\n\n active_app = None\n active_apptitle = None\n\n def get_context_data(self, **kwargs):\n context = super(commonCreateView, self).get_context_data(**kwargs)\n context['mode'] = \"create\"\n context['active_app'] = self.active_app\n context['active_apptitle'] = self.active_apptitle\n context['object_icon'] = self.object_icon\n context['object_name'] = self.object_name\n context['action_list'] = self.action_list\n return context\n\n\nclass commonUpdateView(UpdateView):\n model = None\n form_class = None\n\n object_name = None\n object_icon = None\n\n template_name = \"business/object_form.html\"\n success_url = None\n action_list = None\n\n active_app = None\n active_apptitle = None\n\n def get_context_data(self, **kwargs):\n context = super(commonUpdateView, self).get_context_data(**kwargs)\n context['mode'] = \"update\"\n context['active_app'] = self.active_app\n context['active_apptitle'] = self.active_apptitle\n context['object_icon'] = self.object_icon\n context['object_name'] = self.object_name\n context['action_list'] = self.action_list\n return context\n\n\ndef cleanValue(value, default=\"\"):\n return value if value else default\n\n# class SuccessMessageMixin:\n# \"\"\"\n# Add a success message on successful form submission.\n# \"\"\"\n# success_message = ''\n#\n# def form_valid(self, form):\n# response = super().form_valid(form)\n# success_message = self.get_success_message(form.cleaned_data)\n# if success_message:\n# messages.success(self.request, success_message)\n# return response\n#\n# def get_success_message(self, cleaned_data):\n# return self.success_message % cleaned_data\n#\n#\n# class MessageMixin:\n# \"\"\"\n# Add a message on next screen\n# \"\"\"\n# message = ''\n#\n","repo_name":"five59/django-dtg-store-manager","sub_path":"src/business/helper_backend.py","file_name":"helper_backend.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"}
+{"seq_id":"41849783215","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def middleNode(self, head: ListNode) -> ListNode:\n tail, middle = head, head\n while True:\n if tail == None:\n break\n tail = tail.next\n if tail == None:\n break\n tail = tail.next\n middle = middle.next\n return middle","repo_name":"liooil/leetcode","sub_path":"middle-of-the-linked-list.py","file_name":"middle-of-the-linked-list.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"31236175707","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#Declaramos una funcion que nos devuelva f(x) = exp(-x)* cos (2pi*x)\ndef f(t):\n return t+ 1996.5\n#Definimos el rango de la variable t1 y el intervalo en el que cambia\nt1 = np.arange(0,25,1)\n# Graficamos la variable t1 contra la funcion f(t1)\nplt.plot (t1, f(t1),'b', t1, f(t1),'rD')\n#Le colocamos una leyenda a cada eje\nplt.xlabel('Edad')\nplt.ylabel('anios')\n#Ajustamos los ejes\nplt.axis([0,25, 1995,2020])\n#Guarda la grafica en el formato png\nplt.savefig('Rodrigo.png')\nplt.show()\n","repo_name":"FisicaComputacionalI/20170913-examen-roderickromeoII","sub_path":"rodrigo_romero_de_lazaro.py","file_name":"rodrigo_romero_de_lazaro.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"41402944540","text":"import argparse\nimport os\nfrom pprint import PrettyPrinter\n\nimport numpy as np\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom src.data import ShapeNetDataset, ShuffleDataset, transforms, normalize\nfrom src.data.binvox_rw import Voxels\nfrom src.image2voxel import Image2Voxel\nfrom src.utils import load_config\n\n\ndef save_binvox(voxel, dest, translate, scale):\n binvox = Voxels(voxel, voxel.shape, translate, scale, 'xyz')\n binvox.write(open(dest, 'wb'))\n\n\ndef to_numpy(image):\n image.convert(\"RGB\")\n return [np.asarray(image, dtype=np.float32) / 255]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train transformer conditioned on image inputs')\n parser.add_argument('--annot_path', type=str, required=True,\n help='Path to \"ShapeNet.json\"')\n parser.add_argument('--model_path', type=str, required=True,\n help='Path to the voxel models')\n parser.add_argument('--image_path', type=str, required=True,\n help='Path to the input images')\n parser.add_argument('--batch_size', type=int, default=8,\n help='Batch size for training')\n parser.add_argument('--num_workers', type=int, default=8,\n help='Number of workers for dataloader')\n parser.add_argument('--seed', type=int, default=0,\n help='Manual seed for python, numpy and pytorch')\n parser.add_argument('--split', type=str, default='val',\n help='\"train\", \"test\", or \"val\"')\n parser.add_argument(\"--transformer_config\", type=str, default=None,\n help='Path to the image2voxel config file')\n parser.add_argument(\"--background\", type=int, nargs=3, default=(0, 0, 0),\n help='The (R, G, B) color for the image background')\n parser.add_argument(\"--beam\", type=int, default=1,\n help='Number of beams for generation')\n parser.add_argument(\"--view_num\", type=int, default=1,\n help='Number of views for the image input')\n parser.add_argument(\"--threshold\", type=float, default=0.5,\n help='Threshold for deciding voxel occupancy')\n parser.add_argument(\"--predict\", action='store_true',\n help='Predict and save results')\n parser.add_argument(\"--save_path\", type=str, default=None,\n help='Path to save the prediction')\n\n parser = pl.Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n\n if args.resume_from_checkpoint is None:\n raise ValueError('No checkpoint specified')\n\n pp = PrettyPrinter(indent=4)\n pp.pprint(vars(args))\n\n # =================================================================================\n\n pl.seed_everything(args.seed)\n\n image_trans = transforms.Compose([\n to_numpy,\n transforms.CenterCrop((224, 224), (128, 128)),\n transforms.RandomBackground(((240, 240), (240, 240), (240, 240))),\n transforms.ToTensor(),\n lambda x: x[0],\n normalize\n ])\n\n dataset_params = {\n 'annot_path': args.annot_path,\n 'model_path': args.model_path,\n 'image_path': args.image_path\n }\n dataset = ShapeNetDataset(\n **dataset_params,\n image_transforms=image_trans,\n split=args.split,\n mode='first',\n background=args.background,\n view_num=args.view_num\n )\n\n dataset = ShuffleDataset(dataset)\n loader = DataLoader(\n dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n shuffle=False\n )\n\n # =================================================================================\n\n transformer_config = load_config(args.transformer_config)\n pp.pprint(transformer_config)\n model = Image2Voxel.load_from_checkpoint(\n threshold=args.threshold,\n checkpoint_path=args.resume_from_checkpoint,\n **transformer_config\n )\n\n trainer = pl.Trainer.from_argparse_args(args, logger=False)\n\n if args.predict:\n if args.save_path is None:\n raise ValueError('save_path is not specified')\n\n prediction = trainer.predict(model, loader)\n for pred_dict in tqdm(prediction):\n for i in range(len(pred_dict['generation'])):\n tax_path = os.path.join(args.save_path, pred_dict['taxonomy_id'][i], pred_dict['model_id'][i])\n\n if not os.path.isdir(tax_path):\n os.makedirs(tax_path)\n\n voxel = pred_dict['generation'][i][0].cpu().numpy()\n save_binvox(\n voxel.astype(np.bool),\n os.path.join(tax_path, 'prediction.binvox'),\n pred_dict['translate'][i].cpu().numpy(),\n pred_dict['scale'][i].cpu().numpy(),\n )\n else:\n trainer.test(model, loader)\n","repo_name":"fomalhautb/3D-RETR","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"27"}
+{"seq_id":"30768516517","text":"import xlsxwriter\nimport json\nfrom datajson import datajson\nfrom Counter import countword\nfrom counterregex import finder\nfrom pathlib import Path\nimport os\n\n\nurl = 'https://ieeexplore.ieee.org/document/'\npath = \"C:\\\\Users\\\\prav\\\\PycharmProjects\\\\SVM\\\\PaperClassifier\\\\Papers\\\\\"\n\nOfile='Gesture_recognition_paper'\n\na='Clustering'\nb='Partitional'\ncell_content = ['IEEEID', 'Bibtex']\n\nalgo = {}\n\n\ntry:\n f = open('data\\\\'+Ofile+'.json', )\n Papers = json.load(f)\n\n\nexcept:\n Papers = {}\n Paperjson = json.dumps(Papers)\n f = open('data\\\\'+Ofile+'.json', \"w\")\n f.write(Paperjson)\n f.close()\n\n\n\npaperName=[]\n\n\ndatajson(path,Papers,url,Ofile)\n\nword={}\n\n\nbib = open(\"bib.txt\", \"w\")\n\n\nranking={}\n\nstr_path='file:///C:/Users/prav/PycharmProjects/SVM/PaperClassifier/Papers/'\nfor key,value in Papers.items():\n path = (str_path+key+'.pdf')\n Counts = finder(a,b,str(Papers[key]))\n word[str(value)] = Counts,path,value[2],path\n bib.write(value[1] + \"\\n\")\n\n\nfor k,v in word.items():\n\n print (v[0],v[2],v[3])\n\n\n\n\n\n\n\n","repo_name":"PraveerT/PDFClassifier","sub_path":"pdfclassifier2.py","file_name":"pdfclassifier2.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"29955999427","text":"from OSPF import OSPF\nimport time\n\nospfAAddress = (\"127.0.0.1\", 6785)\nospfBAddress = (\"127.0.0.1\", 6786)\nospfCAddress = (\"127.0.0.1\", 6787)\nospfDAddress = (\"127.0.0.1\", 6788)\nospfEAddress = (\"127.0.0.1\", 6789)\nfilename = input(\"Input the topology filename: \")\nospfB = OSPF(ospfBAddress, filename)\n\n# while True:\n# print(ospfB.distanceVector)\n# print(\"\")\n\n# for item in ospfB.adjMatrix:\n# print(item)\n# print(ospfB.adjMatrix[item])\n# print(\"\")\n\n# time.sleep(2)\n","repo_name":"xihuai18/Virtual-Routing","sub_path":"OSPF/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"22117395918","text":"import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport time\nurl = \"https://www.doviz.com/\"\nresponse = requests.get(url) #Get fonksiyonu sayfanın özelliklerini verir.\nhtml_içeriği = response.content\nsoup = BeautifulSoup(html_içeriği,\"html.parser\")\nAltın = 0\nUSD = 0\nBorsa = 0\n\n\nfor i in soup.find_all(\"span\",{\"data-socket-key\":\"gram-altin\",\"data-socket-attr\":\"s\"}):\n for j in i:\n Altın = j\n print(j)\nfor i in soup.find_all(\"span\",{\"data-socket-key\":\"USD\",\"data-socket-attr\":\"s\"}):\n for j in i:\n USD = j\n print(j)\nfor i in soup.find_all(\"span\",{\"data-socket-key\":\"XU100\",\"data-socket-attr\":\"s\"}):\n for j in i:\n Borsa = j\n print(j)\nwhile True:\n\n print(\"Gram Altın = {}\".format(Altın))\n\n print(\"USD = {}\".format(USD))\n\n print(\"BORSA = {}\".format(Borsa))\n\n print(\"\"\"\"\n Lütfen İşlem Seçiniz:\n 1) Param İle Kaç Gram Altın Alabilirim?\n 2) Param Kaç USD'ye eşit?\n 3) Tayyip ne zaman ölür?\n 4) Her bir basış hükümete bir beddua...\n \"\"\")\n\n işlem = int(input(\"Lütfen işlem seçiniz:\"))\n if (işlem == 1):\n a = int(input(\"Lütfen paranızı giriniz\"))\n b = a // Altın\n print(\"Hesaplanıyor...\")\n time.sleep(2)\n print(\"Paranız ile tam {} altın alabilirsiniz.\".format(b))\n time.sleep(2)\n elif(işlem == 2):\n a = int(input(\"Lütfen paranızı giriniz\"))\n print(\"Hesaplanıyor...\")\n time.sleep(2)\n print(\"Paranız tam {} Amerikan Dolarına eşit\".format(a / USD))\n timeçsleep(2)\n elif(işlem == 3):\n print(\"Umarım en kısa zamanda olurrrrr...\")\n time.sleep(2)\n elif(işlem == 4):\n print(\"Kahrol tayyip al sana bombe\")\n time.sleep(2)\n else:\n print(\"Hatalı işlem girdiniz\")\n continue\n\n\n\n","repo_name":"beyazitt/stunning-octo-telegram","sub_path":"Desktop/Beyaz/pycharm projects/Ödevler/Döviz.py","file_name":"Döviz.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"35873864930","text":"class Solution:\n def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:\n indegree=[0]*(n)\n for a,b in edges:\n indegree[b]+=1\n res=[]\n for index,val in enumerate(indegree):\n if val==0:\n res.append(index)\n return res","repo_name":"aryanrai-vit/Leetcode","sub_path":"1661-minimum-number-of-vertices-to-reach-all-nodes/1661-minimum-number-of-vertices-to-reach-all-nodes.py","file_name":"1661-minimum-number-of-vertices-to-reach-all-nodes.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"15013971999","text":"import time\n\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path='chromedriver.exe')\n\ndriver.get('https://www.roboform.com/filling-test-all-fields')\n\n#inputboxes = driver.find_elements_by_class_name('col-xs-6')\n#print(len(inputboxes))\n\ntitle = driver.find_element_by_name('01___title')\ntitle.send_keys('Fuck OFF')\n\ntime.sleep(5)\ndriver.close()","repo_name":"aquibsid/learning","sub_path":"Selenium/4_inputboxes.py","file_name":"4_inputboxes.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"12833449924","text":"from typing import Optional\n\nfrom .aws import Action as BaseAction\nfrom .aws import BaseARN\n\nservice_name = \"AWS service providing managed private networks\"\nprefix = \"private-networks\"\n\n\nclass Action(BaseAction):\n def __init__(self, action: Optional[str] = None) -> None:\n super().__init__(prefix, action)\n\n\nclass ARN(BaseARN):\n def __init__(self, resource: str = \"\", region: str = \"\", account: str = \"\") -> None:\n super().__init__(\n service=prefix, resource=resource, region=region, account=account\n )\n\n\nAcknowledgeOrderReceipt = Action(\"AcknowledgeOrderReceipt\")\nActivateDeviceIdentifier = Action(\"ActivateDeviceIdentifier\")\nActivateNetworkSite = Action(\"ActivateNetworkSite\")\nConfigureAccessPoint = Action(\"ConfigureAccessPoint\")\nCreateNetwork = Action(\"CreateNetwork\")\nCreateNetworkSite = Action(\"CreateNetworkSite\")\nDeactivateDeviceIdentifier = Action(\"DeactivateDeviceIdentifier\")\nDeleteNetwork = Action(\"DeleteNetwork\")\nDeleteNetworkSite = Action(\"DeleteNetworkSite\")\nGetDeviceIdentifier = Action(\"GetDeviceIdentifier\")\nGetNetwork = Action(\"GetNetwork\")\nGetNetworkResource = Action(\"GetNetworkResource\")\nGetNetworkSite = Action(\"GetNetworkSite\")\nGetOrder = Action(\"GetOrder\")\nListDeviceIdentifiers = Action(\"ListDeviceIdentifiers\")\nListNetworkResources = Action(\"ListNetworkResources\")\nListNetworkSites = Action(\"ListNetworkSites\")\nListNetworks = Action(\"ListNetworks\")\nListOrders = Action(\"ListOrders\")\nListTagsForResource = Action(\"ListTagsForResource\")\nPing = Action(\"Ping\")\nStartNetworkResourceUpdate = Action(\"StartNetworkResourceUpdate\")\nTagResource = Action(\"TagResource\")\nUntagResource = Action(\"UntagResource\")\nUpdateNetworkSite = Action(\"UpdateNetworkSite\")\nUpdateNetworkSitePlan = Action(\"UpdateNetworkSitePlan\")\n","repo_name":"cloudtools/awacs","sub_path":"awacs/private_networks.py","file_name":"private_networks.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":389,"dataset":"github-code","pt":"27"}
+{"seq_id":"43298426223","text":"from delete_turtle import delete_turtle\nfrom teleport_turtle import teleport_turtle\nfrom settings import applesRequired, screen_width, screen_height\n\ndef write_points(turtle, value):\n turtle.speed(100)\n delete_turtle(turtle)\n x = int(screen_width / 2 * -1) + 30\n y = int(screen_height / 2) - 30\n teleport_turtle(turtle, x, y - 20)\n turtle.color(\"#fff\")\n turtle.write(f\"{value}/{applesRequired}\", font=(\"Arial\", 12, \"bold\"))","repo_name":"EricNunes0/Turtle-Game","sub_path":"write_points.py","file_name":"write_points.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13791627843","text":"from __future__ import division\nfrom __future__ import print_function\nfrom flask import Flask, render_template, request\nfrom waitress import serve\nfrom src.Image_Processing import Image_Processing\nimport argparse\nimport editdistance\nfrom src.DataLoader import DataLoader, Batch\nfrom src.SamplePreprocessor import preprocess\nfrom src.Model import Model, DecoderType\nimport cv2\nimport os\nimport src.word_seg\nimport src.dictionary_test\n#import src.Tess\nimport flask\nimport sys\nfrom src.Image_Processing import Image_Processing\n\n#Tess references\nfrom spellchecker import SpellChecker\nimport malaya\nfrom symspellpy import SymSpell, Verbosity\nimport pytesseract\nimport cv2\nimport pkg_resources\nimport main\nspell = SpellChecker()\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\n\nsym_spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7)\ndictionary_path = pkg_resources.resource_filename(\n \"symspellpy\", \"frequency_dictionary_en_82_765.txt\")\nbigram_path = pkg_resources.resource_filename(\n \"symspellpy\", \"frequency_bigramdictionary_en_243_342.txt\")\n# term_index is the column of the term and count_index is the\n# column of the term frequency\nsym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)\nsym_spell.load_bigram_dictionary(bigram_path, term_index=0, count_index=2)\n# prob_corrector = malaya.spell.symspell()\nprob_corrector = malaya.spell.probability()\n\n\n\n\n\n\napp = Flask(__name__)\napp.config['DEBUG'] == True\n\nclass FilePaths:\n \"filenames and paths to data\"\n fnCharList = 'model/charList.txt'\n fnAccuracy = 'model/accuracy.txt'\n fnTrain = 'data/'\n fnCorpus = 'data/corpus.txt'\n\n\ndef getint(name):\n num, _ = name.split('.')\n return int(num)\n\ndef train(model, loader):\n \"train NN\"\n epoch = 0 # number of training epochs since start\n bestCharErrorRate = float('inf') # best valdiation character error rate\n noImprovementSince = 0 # number of epochs no improvement of character error rate occured\n earlyStopping = 5 # stop training after this number of epochs without improvement\n while True:\n epoch += 1\n print('Epoch:', epoch)\n\n # train\n print('Train NN')\n loader.trainSet()\n while loader.hasNext():\n iterInfo = loader.getIteratorInfo()\n batch = loader.getNext()\n loss = model.trainBatch(batch)\n print('Batch:', iterInfo[0], '/', iterInfo[1], 'Loss:', loss)\n\n # validate\n charErrorRate = validate(model, loader)\n\n # if best validation accuracy so far, save model parameters\n if charErrorRate < bestCharErrorRate:\n print('Character error rate improved, save model')\n bestCharErrorRate = charErrorRate\n noImprovementSince = 0\n model.save()\n open(FilePaths.fnAccuracy, 'w').write('Validation character error rate of saved model: %f%%' % (charErrorRate * 100.0))\n else:\n print('Character error rate not improved')\n noImprovementSince += 1\n\n # stop training if no more improvement in the last x epochs\n if noImprovementSince >= earlyStopping:\n print('No more improvement since %d epochs. Training stopped.' % earlyStopping)\n break\n\n\ndef validate(model, loader):\n \"validate NN\"\n print('Validate NN')\n loader.validationSet()\n numCharErr = 0\n numCharTotal = 0\n numWordOK = 0\n numWordTotal = 0\n while loader.hasNext():\n iterInfo = loader.getIteratorInfo()\n print('Batch:', iterInfo[0], '/', iterInfo[1])\n batch = loader.getNext()\n (recognized, _) = model.inferBatch(batch)\n\n print('Ground truth -> Recognized')\n for i in range(len(recognized)):\n numWordOK += 1 if batch.gtTexts[i] == recognized[i] else 0\n numWordTotal += 1\n dist = editdistance.eval(recognized[i], batch.gtTexts[i])\n numCharErr += dist\n numCharTotal += len(batch.gtTexts[i])\n print('[OK]' if dist == 0 else '[ERR:%d]' % dist, '\"' + batch.gtTexts[i] + '\"', '->', '\"' + recognized[i] + '\"')\n\n # print validation result\n charErrorRate = numCharErr / numCharTotal\n wordAccuracy = numWordOK / numWordTotal\n print('Character error rate: %f%%. Word accuracy: %f%%.' % (charErrorRate * 100.0, wordAccuracy * 100.0))\n return charErrorRate\n\n\ndef infer(model, fnImg):\n \"recognize text in image provided by file path\"\n\n img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)\n\n batch = Batch(None, [img])\n (recognized, probability) = model.inferBatch(batch, True)\n return recognized[0], probability[0]\n\n\n\n\ndef run(filename):\n \"main function\"\n # optional command line args\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', help='train the NN', action='store_true')\n parser.add_argument('--validate', help='validate the NN', action='store_true')\n parser.add_argument('--beamsearch', help='use beam search instead of best path decoding', action='store_true')\n parser.add_argument('--wordbeamsearch', help='use word beam search instead of best path decoding', action='store_true')\n parser.add_argument('--dump', help='dump output of NN to CSV file(s)', action='store_true')\n\n args = parser.parse_args()\n\n decoderType = DecoderType.BestPath\n if args.beamsearch:\n decoderType = DecoderType.BeamSearch\n elif args.wordbeamsearch:\n decoderType = DecoderType.WordBeamSearch\n\n # train or validate on IAM dataset\n if args.train or args.validate:\n # load training data, create TF model\n loader = DataLoader(FilePaths.fnTrain, Model.batchSize, Model.imgSize, Model.maxTextLen)\n\n # save characters of model for inference mode\n open(FilePaths.fnCharList, 'w').write(str().join(loader.charList))\n\n # save words contained in dataset into file\n open(FilePaths.fnCorpus, 'w').write(str(' ').join(loader.trainWords + loader.validationWords))\n\n # execute training or validation\n if args.train:\n model = Model(loader.charList, decoderType)\n train(model, loader)\n elif args.validate:\n model = Model(loader.charList, decoderType, mustRestore=True)\n validate(model, loader)\n\n # infer text on test image\n else:\n index_list = []\n result_list = []\n prob_list = []\n print(open(FilePaths.fnAccuracy).read())\n model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True, dump=args.dump)\n\n for dirpath, dirnames, files in os.walk('../output_words/' + filename, topdown=False):\n for sub_file in sorted(files, key=getint):\n img_path = dirpath + '/' + sub_file\n # print('---------------------------------------------------')\n index_number, _ = str(sub_file).split('.')\n # print(\"File path: \"+img_path)\n try:\n result, prob = infer(model, img_path)\n except ValueError:\n print(\"Value error\")\n continue\n # print(index_number, result, prob)\n index_list.append(index_number)\n result_list.append(result)\n prob_list.append(prob)\n\n return index_list, result_list, prob_list\n\n\n\n\n#tess function \ndef tessract_test(img_path, filename):\n img_cv = cv2.imread(img_path)\n img_rgb = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)\n # dic = pytesseract.image_to_data(img_rgb, lang='eng', output_type='data.frame')\n dic = pytesseract.image_to_data(img_rgb, lang='eng', output_type='dict')\n print(\"Recognition with pyTesseract only\")\n print(pytesseract.image_to_string(img_rgb, lang='eng'))\n left_list = dic['left']\n top_list = dic['top']\n width_list = dic['width']\n height_list = dic['height']\n confident_list = dic['conf']\n text_list = dic['text']\n word_num_list = dic['word_num']\n\n filename_output = filename\n for i in range(len(confident_list)):\n if 90 > int(confident_list[i]) >= 0:\n # print(word_num_list[i], left_list[i], top_list[i], width_list[i], height_list[i], confident_list[i], text_list[i])\n x = int(top_list[i])\n y = int(left_list[i])\n w = int(width_list[i])\n h = int(height_list[i])\n # cv2.rectangle(img_cv, (y, x), (y + w, x + h), (0, 0, 0), 3)\n image_to_show = img_cv[x:x + h, y:y + w]\n # cv2.namedWindow('CROP IMAGE', cv2.WINDOW_NORMAL)\n # cv2.imshow(\"CROP IMAGE\", image_to_show)\n cv2.imwrite('../output_words/' + filename_output + '/%d.png' % i, image_to_show) # save word\n # cv2.waitKey()\n return word_num_list, text_list, confident_list\n\n\n#htrengine function\ndef htrengine():\n try:\n #file = request.files['img']\n #return file.filename\n the_filename = \"\"\n for dirpath, dirnames, files in os.walk('input_words/', topdown=False):\n for sub_file in sorted(files):\n img_path = dirpath + sub_file\n the_filename, _ = sub_file.split('.')\n try:\n os.mkdir('output_words/' + the_filename)\n except FileExistsError:\n print(\"Cannot read image because file exist already\")\n #os.mkdir('../output_words/' + the_filename+1)\n continue\n \n word_num, word_text, conf_word = tessract_test(img_path, the_filename) # this will produce output and word segmentation\n index, result, prob = run(the_filename) # NN engine can only run once, need to refactor to place it out of this loop\n # Comparison of score between pytesseract model & NN mdel\n for i in range(len(index)):\n if int(conf_word[int(index[i])]) > 0:\n score = int(conf_word[int(index[i])])/100\n else:\n score = int(conf_word[int(index[i])])\n #print(float(prob[i]),float(score))\n # if probabilty or model higher than pytesseract use, model!\n if float(prob[i]) > float(score):\n print(\"REPLACE prob comparison....................\", word_text[int(index[i])],\">\", result[i])\n word_text[int(index[i])] = result[i]\n else:\n # Feed the pytesseract word into dictionary(symspell)\n pyword = word_text[int(index[i])]\n word_correct = src.dictionary_test.spellcheck(pyword) # can adjust to different dictionery\n print(\"REPLACE with dictionary.........................\", word_text[int(index[i])],\">\", word_correct)\n word_text[int(index[i])] = word_correct \n # Tabulation of text\n the_text = \"\"\n newline = '\\n'\n for i in range(len(word_num)):\n if word_num[i] == 0:\n the_text += newline\n else:\n the_text += str(word_text[i]) + \" \"\n print(\"Correction with handwriting model & dictionary..............................................\")\n print(the_text)\n print(\"Working!\")\n return the_text\n except:\n print(sys.exc_info()[0])\n return \"wRONG INPUT\"\n \n\n\n#---Define routes---\n#Home route\n@app.route('/', methods=['GET'])\ndef home():\n #return htrengine()\n return render_template('home.html')\n\n#process image route\n@app.route('/process_image', methods=['POST'])\ndef process_image():\n return htrengine()\n\n\nserve(app, host='0.0.0.0', port=700) \n\n \nif __name__ == '__main__':\n #htrengine()\n print(\"App is running!\")\n\n ","repo_name":"Zobaid/ImagetoText","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"42995195890","text":"X = int(input())\r\nN = int(input())\r\n\r\nlist = []\r\n\r\nfor i in range(1,N+1):\r\n a, b = input().split()\r\n a = int(a)\r\n b = int(b)\r\n A = (a*b)\r\n list.append(A) \r\n\r\nif sum(list) == X:\r\n print(\"Yes\")\r\nelse:\r\n print(\"No\")","repo_name":"dydrb/springboard_for_growth","sub_path":"백준/Bronze/25304. 영수증/영수증.py","file_name":"영수증.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"33592298386","text":"from pyspark import SparkContext, SparkConf\r\nimport os\r\n\r\n\r\nos.environ[\"JAVA_HOME\"] = '/usr/local/jdk-15.0.1'\r\nconf = SparkConf().setAppName(\"test\")\r\nsc = SparkContext(conf=conf)\r\n\r\nrdd = sc.parallelize([1,2,3,4,5,6,7,8,10], 2)\r\n\r\n\r\naccumu = sc.accumulator(0)\r\n\r\nprint(accumu)\r\n\r\ndef map_func(data):\r\n global accumu\r\n accumu += 1\r\n print(\"count = \", accumu )\r\n\r\n\r\nprint(rdd.map(map_func).collect())\r\nprint(\"count = \", accumu.value)\r\n","repo_name":"MichaelZhangs/practise_own","sub_path":"pyspark_learn/code/rdd_accumulator.py","file_name":"rdd_accumulator.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"38340584063","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import Menu\r\nimport os\r\nimport pyaudio\r\nimport wave\r\nimport numpy as np\r\nimport threading\r\nfrom matplotlib.backends.backend_tkagg import (\r\n FigureCanvasTkAgg, NavigationToolbar2Tk)\r\nfrom matplotlib.figure import Figure\r\nfrom zipfile import ZipFile\r\nfrom scipy.io.wavfile import write\r\nfrom os import remove\r\nimport time\r\nfrom scipy.fft import rfft\r\nfrom PIL import Image, ImageTk\r\nimport sounddevice as sd\r\nimport struct\r\nimport matplotlib.pyplot as plt\r\n\r\nCHUNK = 1024\r\nFORMAT = pyaudio.paInt16\r\nCHANNELS = 1\r\nRATE = 44100\r\nRECORD_SECONDS = 1\r\nWAVE_OUTPUT_FILENAME = \"output.wav\"\r\nPLAY_RANGE = 5\r\n\r\nframes = []\r\nrecording = False\r\nwavFile = []\r\nfourier_frames = []\r\nvec_fourier_frames = []\r\nexternal_wav_path = \"\"\r\n\r\n\r\ndef to_atm(chunksList, wavFilePath):\r\n file = open(\"chunks\", \"wb\")\r\n content = array_to_bytes(chunksList)\r\n file.write(content)\r\n file.close\r\n global frames\r\n with ZipFile('file.atm', 'w') as zip:\r\n zip.write('chunks')\r\n zip.write(wavFilePath)\r\n print(chunksList)\r\n try:\r\n os.remove(\"chunks\")\r\n except:\r\n print(\"File already deleted\")\r\n\r\n\r\ndef from_atm(filepath):\r\n with ZipFile(filepath) as zip:\r\n files = zip.namelist()\r\n for i in range(0, len(files)):\r\n if (\".wav\" in files[i]):\r\n global wavFile\r\n zip.extract(files[i])\r\n wavFile = open_wav_file(files[i])\r\n elif (files[i] == \"chunks\"):\r\n global frames\r\n frames = bytes_to_array(zip.read(files[i]))\r\n\r\n print(type(frames))\r\n print(frames)\r\n\r\n\r\ndef array_to_bytes(x):\r\n np_bytes = BytesIO()\r\n np.save(np_bytes, x, allow_pickle=True)\r\n return np_bytes.getvalue()\r\n\r\n\r\ndef bytes_to_array(b):\r\n np_bytes = BytesIO(b)\r\n return np.load(np_bytes, allow_pickle=True)\r\n\r\n\r\ndef open_wav_file(file):\r\n return wave.open(file, 'rb')\r\n\r\n\r\nclass Autrumn(tk.Tk):\r\n def __init__(self):\r\n tk.Tk.__init__(self)\r\n self.title(\"Autrumn\")\r\n\r\n # create a menubar\r\n menubar = Menu(self)\r\n self.config(menu=menubar)\r\n\r\n # create a menu\r\n autrumn_menu = Menu(menubar, tearoff=False)\r\n\r\n autrumn_menu.add_command(label='Analizador', command=lambda: self.show_frame(Analizador))\r\n autrumn_menu.add_command(label='Reproductor', command=lambda: self.show_frame(Reproductor))\r\n autrumn_menu.add_separator()\r\n autrumn_menu.add_command(\r\n label='Exit',\r\n command=self.destroy\r\n )\r\n\r\n menubar.add_cascade(\r\n label=\"Menú\",\r\n menu=autrumn_menu\r\n )\r\n\r\n self.geometry(\"700x1200\")\r\n\r\n container = tk.Frame(self)\r\n container = tk.Frame(self)\r\n container.grid(row=1, column=1, padx=10, pady=10)\r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n\r\n self.frames = {}\r\n\r\n for F in (Analizador, Reproductor):\r\n frame = F(container, self)\r\n self.frames[F] = frame\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n\r\n self.show_frame(Analizador)\r\n\r\n def show_frame(self, cont):\r\n frame = self.frames[cont]\r\n frame.tkraise()\r\n\r\n\r\nclass Analizador(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n\r\n top_frame = tk.Frame(self, width=200, height=400, bg='grey')\r\n label = ttk.Label(top_frame, text=\"Analizador\", font=\"Consolas\")\r\n top_frame.grid(row=0, column=0, padx=10, pady=5)\r\n label.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n self.fig = Figure(figsize=(5, 1), dpi=100)\r\n self.fig.add_subplot(111).plot(frames)\r\n self.fig2 = Figure(figsize=(5, 3), dpi=100)\r\n self.fig2.add_subplot(111).hist(fourier_frames, bins=100)\r\n self.ax = 0\r\n self.entry = ttk.Entry(top_frame)\r\n self.entry.grid(row=0, column=1, padx=10, pady=10)\r\n\r\n self.btn_load = ttk.Button(top_frame, text=\"Load\",\r\n command=self.start_loading_thread)\r\n self.btn_load.grid(row=1, column=1, padx=10, pady=10)\r\n\r\n start_recording_button = ttk.Button(\r\n top_frame,\r\n text='Start recording',\r\n compound=tk.LEFT,\r\n command=self.start_recording_thread\r\n )\r\n start_recording_button.grid(row=0, column=2, padx=10, pady=10)\r\n\r\n stop_recording_button = ttk.Button(\r\n top_frame,\r\n text='Stop recording',\r\n compound=tk.LEFT,\r\n command=self.recordingAudio\r\n )\r\n stop_recording_button.grid(row=0, column=3, padx=10, pady=10)\r\n\r\n open_audio_button = ttk.Button(\r\n top_frame,\r\n text='Open audio',\r\n compound=tk.LEFT,\r\n command=self.recordingAudio\r\n )\r\n open_audio_button.grid(row=0, column=4, padx=10, pady=10)\r\n\r\n self.frame1 = tk.Frame(self)\r\n self.frame2 = tk.Frame(self)\r\n\r\n self.canvas = FigureCanvasTkAgg(self.fig, self)\r\n self.toolbar = NavigationToolbar2Tk(self.canvas, self.frame1)\r\n self.canvas2 = FigureCanvasTkAgg(self.fig2, self)\r\n self.toolbar2 = NavigationToolbar2Tk(self.canvas2, self.frame2)\r\n\r\n self.frame1.grid(row=4, column=0, padx=10, pady=10)\r\n self.canvas.get_tk_widget().grid(row=5, column=0, padx=10, pady=10)\r\n self.frame2.grid(row=6, column=0, padx=10, pady=10)\r\n self.canvas2.get_tk_widget().grid(row=7, column=0, padx=10, pady=10)\r\n\r\n def start_loading_thread(self):\r\n threading.Thread(target=self.load_data).start()\r\n\r\n def load_data(self):\r\n file = self.entry.get()\r\n print(file)\r\n global frames\r\n frames = []\r\n if(file != \"\"):\r\n global frames\r\n global external_wav_path\r\n external_wav_path = file\r\n print(external_wav_path)\r\n wavFile = wave.open(external_wav_path, 'rb')\r\n frames = []\r\n while(True):\r\n data = wavFile.readframes(CHUNK)\r\n if(len(data) > 0):\r\n frames.append(data)\r\n else:\r\n break\r\n wavFile.close()\r\n\r\n def start_recording_thread(self):\r\n threading.Thread(target=self.recordingAudio).start()\r\n\r\n def recordingAudio(self):\r\n global recording\r\n recording = True\r\n p = pyaudio.PyAudio()\r\n stream = p.open(format=FORMAT, channels=CHANNELS,\r\n rate=RATE, input=True, frames_per_buffer=CHUNK)\r\n print(\"Recording...\")\r\n while(recording):\r\n data = stream.read(CHUNK)\r\n frames.append(data)\r\n print(\"Finished recording\")\r\n stream.stop_stream()\r\n stream.close()\r\n p.terminate()\r\n to_atm(frames, \"C:/Users/maxim/Downloads/\")\r\n\r\n def fft(self):\r\n global fourier_frames\r\n fourier_frames = []\r\n global frames\r\n print(\"Calculating Fourier Transform...\")\r\n for i in range(0, len(frames)):\r\n fourier = rfft(frames[i])\r\n fourier_frames.append(fourier)\r\n print(\"Finished Fourier Transform\")\r\n\r\n\r\nclass Reproductor(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n\r\n top_frame = tk.Frame(self, width=200, height=400, bg='grey')\r\n label = ttk.Label(top_frame, text=\"Reproductor\", font=\"Consolas\")\r\n top_frame.grid(row=0, column=0, padx=10, pady=5)\r\n label.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n play_audio_button = ttk.Button(\r\n top_frame,\r\n text='Play audio',\r\n compound=tk.LEFT,\r\n command=lambda: self.play_audio()\r\n )\r\n play_audio_button.grid(row=0, column=1, padx=10, pady=10)\r\n\r\n self.audio_pos = 0\r\n self.play_audio_thread = None\r\n self.isPlaying = False\r\n\r\n self.frame1 = tk.Frame(self)\r\n self.canvas = FigureCanvasTkAgg(self.fig, self)\r\n self.toolbar = NavigationToolbar2Tk(self.canvas, self.frame1)\r\n\r\n self.frame1.grid(row=4, column=0, padx=10, pady=10)\r\n self.canvas.get_tk_widget().grid(row=5, column=0, padx=10, pady=10)\r\n\r\n def play_audio(self):\r\n global frames\r\n global external_wav_path\r\n print(\"Playing audio...\")\r\n self.isPlaying = True\r\n p = pyaudio.PyAudio()\r\n wf = wave.open(external_wav_path, 'rb')\r\n print(\"wf.getnframes(): \" + str(wf.getnframes()))\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n chunk = 1024\r\n\r\n data = wf.readframes(chunk)\r\n\r\n while data != b'' and self.isPlaying:\r\n stream.write(data)\r\n data = wf.readframes(chunk)\r\n stream.stop_stream()\r\n stream.close()\r\n p.terminate()\r\n print(\"Finished playing audio...\")\r\n\r\n def play_audio_thread(self):\r\n self.play_audio_thread = threading.Thread(target=self.play_audio)\r\n self.play_audio_thread.start()\r\n\r\n\r\nventana = Autrumn()\r\nventana.mainloop()\r\n","repo_name":"MaxAndre22/2023-02-2020087577-IC7602","sub_path":"Autrum.py","file_name":"Autrum.py","file_ext":"py","file_size_in_byte":9410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"24224394904","text":"data = open('input', 'r').read().split('\\n')\n\ndata.pop(0)\n\nx = 0\ncount = 0\nfor line in data:\n x += 3\n while x > len(line):\n line += line\n if line[x] == \"#\":\n count += 1\nprint(count)\n","repo_name":"Mysterken/AdventOfCode2020","sub_path":"D3/Day3-1.py","file_name":"Day3-1.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"74092376391","text":"from django import forms\nfrom myapp.models import Order, Review\nclass SearchForm(forms.Form):\n LENGTH_CHOICES = [\n (8, '8 Weeks'),\n (10, '10 Weeks'),\n (12, '12 Weeks'),\n (14, '14 Weeks'),\n ]\n name = forms.CharField(max_length=100, required=False, label='Student Name')\n length = forms.TypedChoiceField(widget=forms.RadioSelect,\n choices = LENGTH_CHOICES, coerce=int, required=False, label='Prefered course duration')\n max_price = forms.IntegerField(label='Maximum Price', min_value=0)\n\nclass OrderForm(forms.ModelForm):\n class Meta:\n model = Order\n fields = ['courses', 'Student', 'order_status']\n widgets = {'courses': forms.CheckboxSelectMultiple(), 'order_type':forms.RadioSelect}\n labels = {'Student': u'Student Name', }\n\nclass ReviewForm(forms.ModelForm):\n class Meta:\n model = Review\n fields = ['reviewer','course','rating','comments']\n widgets = {'course': forms.RadioSelect}\n labels = {'reviewer': u'Please enter a valid email', 'rating':u'Rating An Integer between 1(worst) and 5(best)'}\n# def clean_rating(self):\n# data = self.cleaned_data.get('rating')\n# if not is_rating_valid(data):\n# raise forms.ValidationError('Ratings must be between 1 to 5')\n# return data\n#\n# def is_rating_valid(rating):\n# return 5 >= rating >= 1","repo_name":"guptaricha913/Django-E-Learning-WebApp","sub_path":"myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"16485412856","text":"from django.db.models import Case, Value, When, CharField\nfrom django.shortcuts import render\nfrom django.db.models.functions import Concat\n\nfrom constants.anonymous_profiles import RESERVED_ANON_NAME\nfrom thoughts.models import Thought\n\ndef index(request):\n current_user = request.user\n all_thoughts = {}\n\n if current_user.is_authenticated:\n all_thoughts = Thought.objects.exclude(status__in = ['2', '3']).annotate(\n username=Case(\n When(anonymous=True, then=Concat(Value('Anonymous '), Value(RESERVED_ANON_NAME))),\n When(anonymous=False, then='author__username'),\n output_field=CharField()\n )).annotate(\n can_delete=Case(\n When(author=current_user, then=Value('True')),\n default=Value('False'),\n output_field=CharField()\n )\n ).values('id', 'username', 'content', 'date_time', 'can_delete').order_by('-date_time')\n\n\n return render(request, 'landing_page/index.html', {'thoughts': all_thoughts})\n\ndef error(request):\n return render(request, 'landing_page/error.html')","repo_name":"tushhr/forum","sub_path":"landing_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"31215691459","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nfrom .as350 import AS350View\nfrom .bell206 import BELL206View\nfrom .bell407 import BELL407View\nfrom .bell429 import BELL429View\nfrom .da40d import DA40DView\nfrom .r22 import R22View\nfrom .r44 import R44View\nfrom .swz269c1 import SWZ269C1View\nfrom .y5b import Y5BView\n\n\nAS350_NAME = 'AS350'\nBELL206_NAME = 'BELL206'\nBELL407_NAME = 'BELL407'\nBELL429_NAME = 'BELL429'\nR22_NAME = 'R22'\nR44_NAME = 'R44'\nDA40D_NAME = 'DA40D'\nSWZ269C1_NAME = 'SWZ269c1'\nY5B_NAME = 'Y5B'\n\n\ndef init_mxp_view(admin_obj, mongodb, category, allowed_mxps=[]):\n # WUJG: 暂时不为各方案提供任何图标\n\n # 笨办法初始化允许的维修方案内容\n for mxp_name in allowed_mxps:\n mxp_name = mxp_name.lower()\n if mxp_name == 'as350':\n admin_obj.add_views(AS350View(\n mongodb, 'as350', AS350_NAME, category=category))\n elif mxp_name == 'bell206':\n admin_obj.add_views(BELL206View(\n mongodb, 'bell206', BELL206_NAME, category=category))\n elif mxp_name == 'bell407':\n admin_obj.add_views(BELL407View(\n mongodb, 'bell407', BELL407_NAME, category=category))\n elif mxp_name == 'bell429':\n admin_obj.add_views(BELL429View(\n mongodb, 'bell429', BELL429_NAME, category=category))\n elif mxp_name == 'da40d':\n admin_obj.add_views(DA40DView(\n mongodb, 'da40d', DA40D_NAME, category=category))\n elif mxp_name == 'r22':\n admin_obj.add_views(R22View(mongodb, 'r22', R22_NAME, category=category))\n elif mxp_name == 'r44':\n admin_obj.add_views(R44View(mongodb, 'r44', R44_NAME, category=category))\n elif mxp_name == 'swz269c1':\n admin_obj.add_views(SWZ269C1View(\n mongodb, 'swz269c1', SWZ269C1_NAME, category=category))\n elif mxp_name == 'y5b':\n admin_obj.add_views(Y5BView(mongodb, 'y5b', Y5B_NAME, category=category))\n","repo_name":"GSIL-Monitor/wxk","sub_path":"modules/views/mxp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"1969973694","text":"config = {\n \"base_file_path\": \"./DHIS2 report\",\n # tracker=DHIS2 Reporting Summary, full=DHIS2 Full Report\n \"report_generation\": \"tracker\",\n \"tracker_file_name\": \"DHIS2 Reporting Summary\",\n \"full_report_file_name\": \"DHIS2 Full Report\",\n \"data_elements_file_name\": \"data_elements.csv\",\n \"org_units_file_name\": \"org_units.csv\",\n \"category_option_combinations\": \"category_option_combinations.csv\",\n \"report_due_day\": 15,\n \"endpoints\": [\n {\n \"base\": \"\",\n \"username\": \"\",\n \"password\": \"\",\n \"report_file_name\": \"reports.csv\",\n \"use_start_date_in_request\": True,\n \"use_end_date_in_request\": True,\n \"default_start_date\": \"2018-01-01\",\n \"default_end_date\": \"2023-06-30\",\n },\n ],\n}\n","repo_name":"MichaelHarawa/dhis2-report-tracker","sub_path":"pihmalawi_config.py","file_name":"pihmalawi_config.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"}
+{"seq_id":"35781939370","text":"#!/bin/env python\n# -*- coding: utf-8 -*\n\"\"\"\n Author: (Guannan Ma)\n\"\"\"\n\nimport os\nimport sys\nimport pdb\nimport socket\n\n_NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'\nsys.path.insert(0, _NOW_PATH + '../')\n\nfrom cup import net\nfrom cup import unittest\n\n\ndef test_port_free():\n \"\"\"test port_listened\"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('0.0.0.0', 61113))\n sock.settimeout(1)\n net.set_sock_reusable(sock)\n sock.listen(1)\n ret = net.localport_free(61113)\n unittest.assert_eq(ret, False)\n unittest.assert_eq(\n net.port_listened(net.get_local_hostname(), 61113),\n True\n )\n sock.close()\n\ntest_port_free()\n# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent\n","repo_name":"baidu/CUP","sub_path":"cup_test/cup_net_init_test.py","file_name":"cup_net_init_test.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":925,"dataset":"github-code","pt":"27"}
+{"seq_id":"34307892187","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 28 16:54:01 2019\r\n\r\n@author: mstambou\r\n\r\nscript in which it will take leaf nodes to taxa assignment dictionary and will create an\r\niTOL importable file where it assigns all nodes (leaves and internal ones) to the most \r\nsepcific taxonomical assignment possible also it will take a dictionary of node name to number \r\nof peptides mapped to it and will display that in the label as well.\r\n\r\nto run this script you need to specify 6 command line arguments:\r\nthe first argument is the tree input file in newick format\r\nsecond argument is the Bin2taxon dictionary mapping file which is produced by 'bins2taxonomic_assignment_GTDBTK.py' script\r\nthe third argument is all nodes 2 taxonomic dictionary mapping between all the nodes to taxonomies produced by 'nodes2LCA_maps.py'\r\nthe fourth argument is a dictionary mapping between all the genomes 2 unipept peptides dictionary produced by 'peptide2genomeMapping2LCA_stringMatches.py'\r\nthe fifth argument is a dictionary mapping between all the peptides to all genomes dictionary produced by 'peptide2genomeMapping2LCA_stringMatches.py'\r\nthe sixth argument is the output directory for specifying where to store the files generated by this script\r\n\r\n\"\"\"\r\n\r\nfrom ete3 import Tree\r\nimport json\r\nimport sys\r\n\r\n\r\n#tree_in_f = 'allBins_Archaea_61RibosomalGTP_EFTU_internalNodesNamed_pruned_rooted.outtree'\r\n#Bin2taxon_clean_dic_f = '../bins2taxonomic_assignment_gtdbtk/allBin2taxon_dic_mergedABC_withARCHAEA_dic.json'\r\n\r\n#allNodes2taxon_dic_f = '../allBinsWithArchaea_tree_averaging_annotation_GTDBTK/allNodes2taxon_dic.json'\r\n\r\n#allGenomes2unipeptS7Peptides_dic_f = 'allGenomes2unipeptS7Peptides_dic.json'\r\n#unipeptS7Peptides2allGenomes_dic_f = 'unipeptS7Peptides2allGenomes_dic.json'\r\n\r\n#out_dir = tree_in_f.split('/', 1)[0]+'/'\r\n\r\nif len(sys.argv) != 7:\r\n print('please enter 6 command line arguments to run this script. example to run script i.e. \\n python3 make_iTOL_node_mostSpecificTaxaAssignment_peptideMapCounts.py dir/to/tree/file Bin2taxon_clean_dic_f allNodes2taxon_dic_f allGenomes2unipeptS7Peptides_dic_f unipeptS7Peptides2allGenomes_dic_f out_dir')\r\n\r\nelse:\r\n tree_in_f = sys.argv[1]\r\n Bin2taxon_clean_dic_f = sys.argv[2]\r\n allNodes2taxon_dic_f = sys.argv[3]\r\n allGenomes2unipeptS7Peptides_dic_f = sys.argv[4]\r\n unipeptS7Peptides2allGenomes_dic_f = sys.argv[5]\r\n out_dir = sys.argv[6]\r\n \r\n sample_name = 'allBins'\r\n \r\n tree = Tree(tree_in_f, format = 1)\r\n \r\n with open(Bin2taxon_clean_dic_f) as in_f:\r\n Bin2taxon_clean_dic = json.load(in_f)\r\n \r\n with open(allNodes2taxon_dic_f, 'r') as in_f:\r\n allNodes2taxa_dic = json.load(in_f)\r\n \r\n with open(allGenomes2unipeptS7Peptides_dic_f, 'r') as in_f:\r\n allGenomes2unipeptS7Peptides_dic = json.load(in_f)\r\n \r\n with open(unipeptS7Peptides2allGenomes_dic_f, 'r') as in_f:\r\n unipeptS7Peptides2allGenomes_dic = json.load(in_f)\r\n \r\n total_n_peptides = len(unipeptS7Peptides2allGenomes_dic)\r\n \r\n allGenomes2unipeptS7PeptideCounts_dic = {k:len(set(v)) for k,v in allGenomes2unipeptS7Peptides_dic.items()}\r\n \r\n leaf_names = tree.get_leaf_names() \r\n all_nodes = list()\r\n \r\n for node in tree.iter_prepostorder():\r\n all_nodes.append(node)\r\n \r\n def getNodeName2NodeMap_dic(tree):\r\n \"\"\"\r\n simple function that will return a node name to node mapping\r\n \"\"\"\r\n nodeName2Node_dic = dict()\r\n for node in tree.iter_prepostorder():\r\n node = node[1]\r\n nodeName2Node_dic[node.name] = node\r\n return nodeName2Node_dic\r\n \r\n tree.get_tree_root().name = 'OROOT'\r\n \r\n nodeName2Node_dic = getNodeName2NodeMap_dic(tree)\r\n \r\n nodeName2NpeptidesMapped_dic = dict()\r\n leaf_names = tree.get_leaf_names()\r\n not_found = list()\r\n \r\n for nodeName in nodeName2Node_dic:\r\n if nodeName in leaf_names:\r\n if nodeName not in allGenomes2unipeptS7Peptides_dic:\r\n not_found.append(nodeName)\r\n else: \r\n nodeName2NpeptidesMapped_dic[nodeName] = len(set(allGenomes2unipeptS7Peptides_dic[nodeName]))\r\n else:\r\n children = nodeName2Node_dic[nodeName].get_leaf_names()\r\n peptides = list()\r\n for child in children:\r\n if child not in allGenomes2unipeptS7Peptides_dic:\r\n not_found.append(child)\r\n else:\r\n peptides.extend(allGenomes2unipeptS7Peptides_dic[child])\r\n peptides = list(set(peptides))\r\n nodeName2NpeptidesMapped_dic[nodeName] = len(peptides)\r\n \r\n def getMostSpecificTaxon(taxon_dic):\r\n for char in 'sgfocpd':\r\n if char in taxon_dic:\r\n if (taxon_dic[char] != '') and ('GCA' not in taxon_dic[char]) and ('GCF' not in taxon_dic[char]):\r\n return char, taxon_dic[char]\r\n else:\r\n return 'NA', 'NA'\r\n \r\n \r\n with open(out_dir + 'nodeName2NpeptidesMapped_dic.json', 'w') as out_f:\r\n json.dump(nodeName2NpeptidesMapped_dic, out_f)\r\n \r\n #with open(out_dir + sample_name+'_labelsWithoutCounts.txt', 'w') as out_f:\r\n with open(out_dir + sample_name+'_labels.txt', 'w') as out_f:\r\n out_f.write('LABELS\\n')\r\n out_f.write('SEPARATOR TAB\\n')\r\n out_f.write('\\n')\r\n out_f.write('DATA\\n')\r\n for i, genome in enumerate(allNodes2taxa_dic): \r\n level, mostSpecificTaxon = getMostSpecificTaxon(allNodes2taxa_dic[genome])\r\n print(level, mostSpecificTaxon)\r\n #node = nodeName2Node_dic[genome]\r\n #n_leaves = len(node.get_leaves())\r\n n_peptides = 0\r\n if genome in nodeName2NpeptidesMapped_dic:\r\n n_peptides = nodeName2NpeptidesMapped_dic[genome]\r\n out_f.write(genome+'\\t'+mostSpecificTaxon+' ('+str(n_peptides)+'/'+str(total_n_peptides)+')\\n')\r\n #out_f.write(genome+'\\t'+mostSpecificTaxon+'\\n')\r\n \r\n\r\n","repo_name":"mgtools/guttree","sub_path":"metaproteome_application/make_iTOL_node_mostSpecificTaxaAssignment_peptideMapCounts.py","file_name":"make_iTOL_node_mostSpecificTaxaAssignment_peptideMapCounts.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"30521200709","text":"import logging\nimport time\nimport os.path as op\nfrom threading import RLock\n\nfrom service.events_db import FileNotFound, FileInProcessing\nfrom common.utils import ensure_unicode\nfrom common.constants import STATUS_WAIT, FILE_LINK_SUFFIX\nfrom common.webserver_client import Client_APIError\nfrom service.shell_integration import params\nfrom common.file_path import FilePath\nfrom common.async_qt import qt_run\nfrom common.application import Application\nfrom common.translator import tr\nfrom service.events_db.file_events_db import FileEventsDBError\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nINCORRECT_PATH = 1\nNOT_IN_SYNC = 2\nINCORRECT_SERVER_RESPONSE = 4\nSAVE_TO_CLIPBOARD_FAILED = 8\n\nnode_synced = False\n\nlink_copied_to_clipboard = None\nshare_link_thread_id = 0\nshare_link_thread_active = 0\nshare_link_thread_lock = RLock()\n\n\n\nclass SharePathException(Exception):\n pass\n\n\ndef update_sync_status(status, substatus, l, r, fs, ee):\n global node_synced\n node_synced = status == STATUS_WAIT\n\n\ndef link_copy_success(success):\n global link_copied_to_clipboard\n link_copied_to_clipboard = success\n\n\ndef get_relpath(path):\n # Get sync directory path\n root = params.cfg.sync_directory\n if not root:\n logger.error(\"Sync directory is not set\")\n raise SharePathException()\n\n if path.endswith(FILE_LINK_SUFFIX) and not op.isdir(path):\n path = path[: -len(FILE_LINK_SUFFIX)]\n\n # Path is not in sync directory\n if not (FilePath(path) in FilePath(root)):\n logger.debug(\"Path '%s' is not in sync directory '%s'\", path, root)\n raise SharePathException()\n\n # Path is not exist\n if not op.exists(path) and not op.exists(path + FILE_LINK_SUFFIX):\n logger.warning(\"Path '%s' is not exist\", path)\n raise SharePathException()\n\n # Name of the file relative to the root directory\n return root, op.relpath(FilePath(path), FilePath(root))\n\n\n@qt_run\ndef share_paths(paths, link_ready_cb, save_to_clipboard=False,\n context='', move=False):\n \"\"\"\n Shares given paths via API\n\n @param paths Paths to be shared [list]\n @param link_ready_cb Callback to be called on links ready or on error\n [callable]\n @param save_to_clipboard Whether links are to be saved to clipboard [bool]\n @param context Context to be return in message if any [str]\n @param move Type of share message recieved (move or copy) [bool]\n @return None\n \"\"\"\n def process_error(error, error_info=''):\n msg = {\n INCORRECT_PATH:\n \"Failed to share '%s'. Incorrect path\",\n NOT_IN_SYNC:\n \"Path for share not in sync '%s'\",\n INCORRECT_SERVER_RESPONSE:\n \"Failed to share '%s'. Incorrect server response\",\n SAVE_TO_CLIPBOARD_FAILED:\n \"Failed to save share link '%s' to clipboard\",\n }\n logger.error(msg[error], path)\n if params.tracker:\n tracker_errors = {\n INCORRECT_PATH: params.tracker.INCORRECT_PATH,\n NOT_IN_SYNC: params.tracker.NOT_IN_SYNC,\n INCORRECT_SERVER_RESPONSE:\n params.tracker.INCORRECT_SERVER_RESPONSE,\n SAVE_TO_CLIPBOARD_FAILED:\n params.tracker.INTERNAL_ERROR,\n }\n params.tracker.share_error(\n 0,\n tracker_errors[error],\n time.time() - start_time)\n if callable(link_ready_cb):\n link_ready_cb(paths, None, error_info)\n\n start_time = time.time()\n global share_link_thread_id\n global share_link_thread_active\n global link_copied_to_clipboard\n with share_link_thread_lock:\n share_link_thread_id += 1\n thread_id = share_link_thread_id\n share_link_thread_active = thread_id\n\n # Share without expire\n share_ttl = 0\n num_tries = 5\n num_save_to_clipboard_tries = 5\n timeout = 10 * 60 # seconds\n message_timeout = 2 # seconds\n\n step = 0\n share_links = []\n result_paths = []\n\n for path in paths:\n path = ensure_unicode(path)\n try:\n # Name of the file relative to the root directory\n root, rel_path = get_relpath(path)\n except SharePathException:\n process_error(INCORRECT_PATH)\n return\n logger.info(\"Sharing path '%s'...\", rel_path)\n\n share_link = None\n\n while True:\n # Wait if file not in db yet\n try:\n if op.isfile(path) or op.isfile(path + FILE_LINK_SUFFIX):\n is_file = True\n uuid = params.sync.get_file_uuid(rel_path)\n elif op.isdir(path):\n is_file = False\n uuid = params.sync.get_folder_uuid(rel_path)\n else:\n process_error(INCORRECT_PATH)\n return\n except (FileNotFound, FileInProcessing, FileEventsDBError):\n uuid = None\n\n if uuid or (time.time() - start_time > timeout and node_synced):\n break\n\n if step == message_timeout:\n filename = op.basename(rel_path)\n Application.show_tray_notification(\n tr(\"Prepare to copy URL(s) for downloading to clipboard.\\n\"\n \"URL(s) will be copied after {} synced\").format(\n filename),\n tr(\"Sharing\"))\n\n step += 1\n time.sleep(1)\n\n if not uuid:\n process_error(NOT_IN_SYNC)\n return\n\n error_info = ''\n existing_share = params.ss_client.get_sharing_info()\n if uuid in existing_share:\n share_link = existing_share[uuid].get('share_link')\n logger.debug(\"Link for %s already exists: %s\", path, share_link)\n else:\n # Register sharing enabling on API server\n for i in range(num_tries):\n # wait if file not registered yet\n try:\n share_link, share_hash, error_info = params.web_api.sharing_enable(\n uuid, share_ttl)\n except Client_APIError:\n pass\n\n if share_link:\n break\n\n time.sleep(1)\n\n if not share_link:\n process_error(INCORRECT_SERVER_RESPONSE, error_info)\n return\n share_links.append(share_link)\n result_paths.append(FilePath(path).shortpath)\n\n if save_to_clipboard:\n share_link = '\\r\\n'.join(share_links)\n with share_link_thread_lock:\n link_copied_to_clipboard = False\n tries = 0\n while tries < num_save_to_clipboard_tries:\n with share_link_thread_lock:\n if thread_id != share_link_thread_active:\n return\n\n if link_copied_to_clipboard is not None:\n link_copied_to_clipboard = None\n tries += 1\n # Copy URL to clipboard (if any)\n Application.save_to_clipboard(share_link)\n time.sleep(0.1)\n\n if link_copied_to_clipboard:\n break\n else:\n process_error(SAVE_TO_CLIPBOARD_FAILED)\n return\n\n if params.tracker:\n pass\n# todo fix me\n# params.tracker.share_add(\n# is_file,\n# uuid, share_link,\n# time.time() - start_time)\n\n if callable(link_ready_cb):\n link_ready_cb(\n result_paths, share_links, save_to_clipboard=save_to_clipboard,\n context=context, move=move)\n\n\ndef cancel_sharing(paths):\n '''\n Cancels sharing of paths given via API\n\n @param path Path to be shared [unicode]\n @return Operation success flag [bool]\n '''\n\n # Name of the file relative to the root directory\n success = False\n for path in paths:\n try:\n _, rel_path = get_relpath(path)\n except SharePathException:\n if params.tracker:\n params.tracker.share_cancel(0, False)\n continue\n\n logger.info(\"Cancelling sharing path '%s'...\", rel_path)\n\n sharing_info = params.ss_client.get_sharing_info()\n\n # Given path is a file inside sync directory\n if op.isfile(path) or op.isfile(path + FILE_LINK_SUFFIX):\n uuid = params.sync.get_file_uuid(rel_path)\n elif op.isdir(path):\n uuid = params.sync.get_folder_uuid(rel_path)\n else:\n if params.tracker:\n params.tracker.share_cancel(0, False)\n continue\n\n # Check that UUID is known as shared\n if not uuid or uuid not in sharing_info:\n logger.error(\"No share for path '%s'\", rel_path)\n if params.tracker:\n params.tracker.share_cancel(0, False)\n continue\n\n # Register sharing disabling on API server\n try:\n params.web_api.sharing_disable(uuid)\n except Client_APIError:\n logger.error(\n \"API request for cancel sharing of '%s' failed\", rel_path)\n if params.tracker:\n params.tracker.share_cancel(uuid, False)\n continue\n\n if params.tracker:\n params.tracker.share_cancel(uuid, True)\n success = True\n\n return success\n\n\ndef is_paths_shared(paths):\n for path in paths:\n is_file = op.isfile(path)\n try:\n _, rel_path = get_relpath(path)\n except SharePathException:\n continue\n logger.debug(\"Checking sharing path '%s'...\", rel_path)\n if params.sync.is_path_shared(rel_path, is_file):\n return True\n\n return False\n\n\ndef is_folder(path):\n return op.isdir(path)\n","repo_name":"pvtbox/pvtbox-desktop","sub_path":"service/shell_integration/share_path.py","file_name":"share_path.py","file_ext":"py","file_size_in_byte":9788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"33941691543","text":"import numpy as np\nfrom itertools import combinations\nimport csv\nimport sys\n\nimport math\nfrom scipy.stats import norm\nimport random\nimport pandas as pd\n\n\ndef stock_calculator(sp_index, dataframe):\n # #print(\"Reading sp_index file into pandas Dataframe\")\n # df_spindex = pd.read_csv(sp_index)\n\n # #print(\"Reading stockprice file into dataframe\")\n # df = pd.read_csv(csvfile)\n df_spindex = sp_index\n df = dataframe\n df.insert(6, \"closeNew\", df_spindex[\"close\"])\n df = df[[\"close\", \"closeNew\"]]\n df.columns = [\"close_stocks\", \"close_benchmark\"]\n\n #print(\"Raveling columns\")\n close_stocks_values = df.close_stocks.values\n close_benchmark_values = df.close_benchmark.values\n\n close_stocks_values = close_stocks_values[::-1]\n close_benchmark_values = close_benchmark_values[::-1]\n\n # #print(\" Max Benchmark Values\", max(close_benchmark_values))\n # #print(\" Min Benchmark Values\", min(close_benchmark_values))\n\n # #print(close_stocks_values[:100])\n\n # #print(\"Calculating mean and variance of stock prices\")\n # # Variance and mean calculations\n # # stock_var = np.var(close_stocks_values)\n # stock_mean = sum(list(close_stocks_values)) / len(close_stocks_values)\n # stock_var = sum([(i-stock_mean)**2 for i in close_stocks_values])/(len(close_stocks_values)-1)\n #\n # #print(\"Calculating mean and variance of benchmark index\")\n # # benchmark_var = np.var(close_benchmark_values)\n # benchmark_mean = sum(list(close_benchmark_values)) / len(close_benchmark_values)\n # benchmark_var = sum([(i-benchmark_mean)**2 for i in close_benchmark_values])/(len(close_benchmark_values)-1)\n\n\n close_stocks_change = []\n close_benchmark_change = []\n\n #print(\"Calculating % change in stock prices\")\n for i in range(1, len(close_stocks_values)):\n close_stocks_change.append(\n (close_stocks_values[i] - close_stocks_values[i - 1]) / close_stocks_values[i - 1])\n\n #print(\"Calculating % change in benchmark values\")\n for i in range(1, len(close_benchmark_values)):\n close_benchmark_change.append(\n (close_benchmark_values[i] - close_benchmark_values[i - 1]) / close_benchmark_values[i - 1])\n\n close_stocks_change = np.array(close_stocks_change)\n\n close_benchmark_change = np.array(close_benchmark_change)\n\n #print(\"Calculating mean and variance of close stock prices\")\n # Variance and mean calculations\n stock_var = np.var(close_stocks_change)\n stock_mean = sum(list(close_stocks_change)) / len(close_stocks_change)\n # stock_var = sum([(i - stock_mean) ** 2 for i in close_stocks_values]) / (len(close_stocks_values) - 1)\n\n #print(\"Calculating mean and variance of close benchmark index\")\n benchmark_var = np.var(close_benchmark_change)\n benchmark_mean = sum(list(close_benchmark_change)) / len(close_benchmark_change)\n # benchmark_var = sum([(i - benchmark_mean) ** 2 for i in close_benchmark_values]) / (len(close_benchmark_values) - 1)\n\n\n df_change = pd.DataFrame(close_stocks_change)\n # #print(df_change.head())\n\n df_change.insert(1, \"change_benchmark\", close_benchmark_change)\n\n df_change.columns = [\"%change_stocks\", \"%change_benchmark\"]\n\n # #print(\"Writing % changes to CSV\")\n # df_change.to_csv(csvfile.split(\".\")[0]+ \"_percent_change\" + \".csv\", index=False)\n\n # #print(\"Calculation on {} complete\".format(csvfile))\n return df_change, stock_var, stock_mean, benchmark_var, benchmark_mean\n\n# Function returns Sharpes ratio, Alpha, Beta portfolio, standard deviation\n# [(df,ticker,quan)...(spy_df)]\ndef get_risk_indicators(list_of_stock_files):\n beta_mean_variance_and_quantity_values = {}\n benchmark_mean = 0\n stocks_change_values = {}\n spindex_file = list_of_stock_files[-1][0]\n stocks = [i[1] for i in list_of_stock_files[:-1]]\n\n\n for dataframe, ticker, quantity in list_of_stock_files[:-1]:\n df, stock_var, stock_mean, benchmark_var, benchmark_mean = stock_calculator(spindex_file, dataframe)\n\n #print(\"Benchmark variance\",benchmark_var)\n\n # df = pd.read_csv(file.split(\".\")[0] + \"_percent_change.csv\")\n\n stocks_values = df[\"%change_stocks\"].values\n stocks_change_values[ticker] = stocks_values\n benchmark_values = df[\"%change_benchmark\"].values\n cov = np.cov(stocks_values, benchmark_values)\n covariance = cov[0][1]\n\n #print(\"Stock mean\",stock_mean)\n #print(\"Benchmark mean\", benchmark_mean)\n\n # Beta calculation\n #print(\"Beta calculation\")\n beta = covariance/benchmark_var\n # #print(\"BETA for {}\".format(file), beta)\n beta_mean_variance_and_quantity_values[ticker] = (beta, stock_mean, stock_var, quantity)\n\n\n RISK_FREE_RATE = 0.0126/365\n # #print(beta_mean_and_variance_values)\n\n\n total_capital = sum([i[1]*i[3] for i in beta_mean_variance_and_quantity_values.values()])\n\n beta_portfolio = sum([i[0]*((i[3]*i[1])/total_capital) for i in beta_mean_variance_and_quantity_values.values()])\n #print(beta_portfolio)\n\n portfolio_mean = sum([i[1]*((i[3]*i[1])/total_capital) for i in beta_mean_variance_and_quantity_values.values()])\n\n # Alpha calculation\n alpha = portfolio_mean-RISK_FREE_RATE-(beta_portfolio*(benchmark_mean-RISK_FREE_RATE))\n #print(\"ALPHA\", alpha)\n\n weight_array = [(i[3]*i[1])/total_capital for i in beta_mean_variance_and_quantity_values.values()]\n\n dict_of_weights = {}\n\n for i in stocks:\n dict_of_weights[i] = weight_array[0]\n\n # Variance of portfolio\n\n combinations_of_two = list(combinations(stocks, 2))\n # #print(\"STOCKS ARRAY\", stocks_change_values)\n #print(combinations_of_two)\n portfolio_of_variance = 0\n for i in combinations_of_two:\n size1 = stocks_change_values[i[0]].size\n size2 = stocks_change_values[i[1]].size\n size = min(size1, size2)\n\n cov = np.cov(stocks_change_values[i[0]][:size], stocks_change_values[i[1]][:size])\n covariance = cov[0][1]\n #print(\"COVARIANCE:\", cov)\n current_pV = dict_of_weights[i[0]]**2 * beta_mean_variance_and_quantity_values[i[0]][2] + dict_of_weights[i[1]]**2 * beta_mean_variance_and_quantity_values[i[1]][2] \\\n + 2 * covariance * dict_of_weights[i[0]] * dict_of_weights[i[1]]\n portfolio_of_variance += current_pV\n\n #print(\"standard deviation of portfolio\", math.sqrt(portfolio_of_variance))\n\n\n # Sharpes ratio\n if portfolio_of_variance==0.0:\n sharpes_ratio=1.0\n else:\n sharpes_ratio = (portfolio_mean - RISK_FREE_RATE)/math.sqrt(portfolio_of_variance)\n # sharpes_ratio += 0.5\n\n standard_deviation = math.sqrt(portfolio_of_variance)\n\n #print(sharpes_ratio)\n return sharpes_ratio, alpha, beta_portfolio, standard_deviation\n","repo_name":"varunkm/baml-hack-webapp","sub_path":"stockhelper/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"15255426392","text":"import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.stats as stats\r\n\r\ndef label_point(x, y, val, ax):\r\n a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)\r\n for i, point in a.iterrows():\r\n if i == 7:\r\n ax.text(point['x']-.04, point['y'], str(point['val']),horizontalalignment='right',size= 5)\r\n elif i == 10:\r\n ax.text(point['x']+.04, point['y'], str(point['val']),horizontalalignment='left',size= 5)\r\n elif i == 9:\r\n ax.text(point['x'], point['y']+.1, str(point['val']),horizontalalignment='center',size= 5)\r\n elif i == 4:\r\n ax.text(point['x']+.1, point['y']-.16, str(point['val']),horizontalalignment='right',size= 5)\r\n elif i == 14:\r\n ax.text(point['x']-.04, point['y']-.08, str(point['val']),horizontalalignment='right',size= 5)\r\n elif i == 15:\r\n ax.text(point['x'], point['y']-.04, str(point['val']),horizontalalignment='left',size= 5)\r\n elif i == 2:\r\n ax.text(point['x']-.04, point['y']-.1, str(point['val']),horizontalalignment='right',size= 5)\r\n elif i == 0:\r\n ax.text(point['x']-.15, point['y']+.1, str(point['val']),horizontalalignment='center',size= 5)\r\n elif point['x'] <= 2.5:\r\n ax.text(point['x']+.04, point['y'], str(point['val']),size= 5)\r\n else:\r\n ax.text(point['x']-.04, point['y'], str(point['val']),horizontalalignment='right',size= 5)\r\n\r\n\r\ndf = pd.read_table('NOAV/NOAV.txt',sep='\\t',header='infer',encoding='latin1')\r\ndf = df[pd.notnull(df['NOAVIssueDate'])]\r\nyear = []\r\nprint(df.columns)\r\nfor i in df['NOAVIssueDate']:\r\n mth, day, x = i.split('/')\r\n try:\r\n yr, tm = x.split(\" \")\r\n year.extend([int(yr)])\r\n except:\r\n yr = int(x)\r\n year.extend([yr])\r\ndf['ViolationYear'] = year\r\n\r\n# # ******** Portion of the code for penalties assessed\r\n# penalty = df[['ViolationYear','OperatorName','NOAVIssueDate','FinalResolutionComments']]\r\n# penalty = penalty[(penalty['FinalResolutionComments'].str.contains('$')) |\r\n# (penalty['FinalResolutionComments'].str.contains('fine',case=False))]\r\n#\r\n# this = penalty['FinalResolutionComments'].str.contains('no fine',case=False)\r\n# penalty = penalty[np.logical_not(this)]\r\n#\r\n# penalty.to_csv('NOAV/fined.csv')\r\n#\r\n# yrs = list(df['ViolationYear'].drop_duplicates())\r\n#\r\n# percent = []\r\n# for i in yrs:\r\n# tot = len(df[df['ViolationYear'] == i])\r\n# pen = len(penalty[penalty['ViolationYear'] == i])\r\n# per = (pen/tot)*100\r\n# percent.extend([per])\r\n#\r\n# sns.barplot(yrs,percent)\r\n# plt.xlabel('Year')\r\n# plt.ylabel('Percent Fined')\r\n# plt.title('Percent of NOAVs Resulting in Fines')\r\n# plt.savefig('fig1.png')\r\n# plt.show()\r\n# plt.close()\r\n#\r\n# operators = list(penalty['OperatorName'].drop_duplicates())\r\n# violators = []\r\n# for i in operators:\r\n# tot = len(penalty[penalty['OperatorName'] == i])\r\n# violators.extend([tot])\r\n# new = pd.DataFrame(data=[violators,operators])\r\n# new = new.T\r\n# new.columns = ['Vi','Op']\r\n# new = new[new['Vi'] >= 10]\r\n# sns.barplot(x='Vi',y='Op',data=new)\r\n# plt.ylabel('Operator')\r\n# plt.yticks(size=6)\r\n# plt.xlabel('Number Violations')\r\n# plt.title('Violations Resulting in Fines by Operator')\r\n# plt.tight_layout()\r\n# plt.savefig('fig2.png')\r\n# plt.show()\r\n# plt.close()\r\n# # **********\r\n\r\n# # *********** Penalties not assessed\r\n# yrs = list(df['ViolationYear'].drop_duplicates())\r\n# operators = df.drop_duplicates(subset=['OperatorName','OperatorNumber'])\r\n# operators = operators[['OperatorNumber','OperatorName']]\r\n# data = pd.read_csv('Production_Summaries/Colorado Production 2011-2017.txt',sep='\\t',header='infer',encoding=None)\r\n# data = data.drop(labels=data.columns.tolist()[0], axis=1)\r\n# num = operators['OperatorNumber'].drop_duplicates()\r\n# number_NOAV = []\r\n# for i in num:\r\n# total = len(df[df['OperatorNumber'] == i])\r\n# number_NOAV.extend([total])\r\n#\r\n# new = pd.DataFrame([list(operators['OperatorName']),list(operators['OperatorNumber']),number_NOAV])\r\n# new = new.T\r\n# new.columns=['Operator Name','Operator Number','NOAV Count']\r\n# for i in list(range(2011,2018,1)):\r\n# column = str(i)\r\n# new[column] = [np.nan]*len(new)\r\n#\r\n# data['report_year'] = list(map(int, list(data['report_year'])))\r\n# this = data['operator_num'].drop_duplicates()\r\n# check = this[this.isin(list(operators['OperatorNumber']))]\r\n# print(len(this))\r\n# print(len(check))\r\n# print(len(new))\r\n# for i in list(range(2011,2018,1)):\r\n# year_ct = []\r\n# column = str(i)\r\n# for j in list(new['Operator Number']):\r\n# dum = data[(data['operator_num'] == j) & (data['report_year'] == i)]\r\n# co = len(dum[(pd.notnull(dum['oil_prod'])) & (pd.notnull(dum['gas_prod']))])\r\n# year_ct.extend([co])\r\n# print(i)\r\n# new[column] = year_ct\r\n\r\n# # *********** New stuff\r\nsns.set_style('darkgrid')\r\nnew = pd.read_csv('Production_Summaries/NOAV_activeWell_company.csv')\r\navgs = []\r\nfor i in list(new['Operator Number']):\r\n series = new[new['Operator Number'] == i]\r\n series = series[['2011','2012','2013','2014','2015','2016','2017']]\r\n series = series.T\r\n series = list(series[series.columns.tolist()[0]])\r\n a = np.average(series)\r\n avgs.extend([a])\r\nnew['Average Active Wells'] = np.nan*len(new)\r\nnew['Average Active Wells'] = avgs\r\nnew = new[(new['Average Active Wells'] > 0) & (new['NOAV Count'] > 0)]\r\nnew = new[(pd.notnull(new['Average Active Wells'])) & (pd.notnull(new['NOAV Count']))]\r\n# sns.regplot(x='Average Active Wells',y='NOAV Count',data=new,marker='+')\r\n# plt.semilogx()\r\n# plt.xlim(1,10000)\r\n# plt.savefig('NOAV preliminary.png')\r\n# plt.tight_layout()\r\n# plt.show()\r\n# plt.close()\r\n\r\nnew['NOAV per well'] = new['NOAV Count']/new['Average Active Wells']\r\nnew['Average Active Wells'] = np.log10(np.array(new['Average Active Wells']))\r\nnew['NOAV per well'] = np.log10(np.array(new['NOAV per well']))\r\nx = np.array(new['Average Active Wells'])\r\ny = np.array(new['NOAV per well'])\r\nsns.regplot(x='Average Active Wells',y='NOAV per well',data=new,marker='+')\r\nplt.ylim(-4,2)\r\nplt.yticks(np.arange(2,-5,-1),['100','10','1','.1','.01','.001','.0001'],rotation=30)\r\nplt.xlim(-1,4)\r\nplt.xticks(np.arange(-1,5,1),['.1','1','10','100','1000','10000'],rotation=30)\r\ns, inter, r, p ,std = stats.linregress(x,y)\r\nr = -r\r\nr = \"{0:.3g}\".format(r)\r\nstd = \"{0:.3g}\".format(std)\r\nrho, p = stats.spearmanr(x,y)\r\np = \"{0:.3g}\".format(p)\r\nrho = \"{0:.3g}\".format(rho)\r\n\r\nplt.text(-0.2,-2.5,'R^2: ' +str(r),horizontalalignment='left',size=10)\r\nplt.text(-0.2,-2.8,'Std. Err.: ' +str(std),horizontalalignment='left',size=10)\r\nplt.text(-0.2,-3.1,'P-Val: ' +str(p),horizontalalignment='left',size=10)\r\nplt.text(-0.2,-3.4,'Spearman Rank: ' +str(rho),horizontalalignment='left',size=10)\r\nplt.tight_layout()\r\nplt.savefig('NOAV as function of well.png')\r\nplt.show()\r\nplt.close()\r\n\r\nnew = new[(new['NOAV per well'] >= (1.3-new['Average Active Wells']))]\r\nnew['index'] = list(range(len(new)))\r\nnew = new.set_index('index')\r\nx = np.array(new['Average Active Wells'])\r\ny = np.array(new['NOAV per well'])\r\nex = [-2,5]\r\nwhy = [((s*-2)+inter),((s*5)+inter)]\r\nplt.plot(ex,why,'--k',alpha=0.5)\r\nplt.scatter(x,y)\r\nplt.ylim(-4,2)\r\nplt.yticks(np.arange(2,-5,-1),['100','10','1','.1','.01','.001','.0001'],rotation=30)\r\nplt.xlim(-1,4)\r\nplt.xticks(np.arange(-1,5,1),['.1','1','10','100','1000','10000'],rotation=30)\r\nlabel_point(new['Average Active Wells'],new['NOAV per well'],new['Operator Name'],plt.gca())\r\nplt.savefig('Labeled Companies Issues.png')\r\nplt.show()\r\nplt.close()\r\n\r\ncount = []\r\nyrs = list(range(2011,2019,1))\r\nfor i in yrs:\r\n total = len(df[df['ViolationYear'] == i])\r\n count.extend([total])\r\npdf = pd.DataFrame([yrs,count])\r\npdf = pdf.T\r\npdf.columns = ['Year','Number of NOAVs Issued']\r\nsns.barplot(x='Year',y='Number of NOAVs Issued',data=pdf)\r\nplt.title('Number of NOAVs per year')\r\nplt.tight_layout()\r\nplt.savefig('NOAVs per year.png')\r\nplt.show()\r\nplt.close()\r\n\r\n\r\n# 'OperatorNumber',OperatorName','NOAVIssueDate',\r\n# 'operator_num','name','report_year','oil_prod','gas_prod'","repo_name":"JohnStults/NOAV","sub_path":"NOAV_analysis.py","file_name":"NOAV_analysis.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"24119299298","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nimport glob\nimport g#g.val1=seikaisuu g.val2=count_id g.val3=sec g.val4=第i\nimport pygame.mixer as pgm\nfrom time import sleep\nimport gc\n\n\ndef play():\n\tgc.collect()\n\tpgm.init()\n\tbgm=pgm.Sound('maou_game_jingle05.wav')\n\tbgm.set_volume(0.2)\n\tbgm.play(-1)\n\tse=pgm.Sound('maou_se_system49.wav')\n\tse.set_volume(0.3)\n\tbgm2=pgm.Sound('maou_bgm_8bit10.wav')\n\tbgm2.set_volume(0.2)\n\tse2=pgm.Sound('maou_se_8bit15.wav')\n\tse2.set_volume(0.3)\n\tbgm3=pgm.Sound('maou_game_jingle09.wav')\n\tbgm3.set_volume(0.2)\n\tse1_=pgm.Sound('クイズ正解1.wav')\n\tse2_=pgm.Sound('クイズ不正解1.wav')\n\tdef get_entry_text():\n\t\tbgm.stop()\n\t\t\n\t\t\n\t\tse.play(0)\n\t\tsleep(1)\n\t\tql=ql_m.curselection()\n\t\tF = open('quizedata\\{0}.txt'.format(ql_m.get(ql)), 'r', encoding='utf-8')\n\t\t\n\t\tqandf = F.readlines()\n\t\trootP.destroy()\n\t\t\n\t\tbgm2.play(-1)\n\t\tquize=[None]*5\n\t\tfact=[None]*5\n\t\t#\n\t\tfor i in range(5):\n\t\t\tquize[i] = qandf[i * 2]\n\t\t\tfact[i] = qandf[i * 2 + 1]\n\t\tg.val4=0\n\t\t\t\n\t\tdef game(f,quize,fact):#pray2 この中で5回\n\t\t\t#kはiみたいな \n\t\t\tg.val3=16\n\t\t\t\n\t\t\t\n\t\t\trootPP=tk.Tk()\n\t\t\trootPP.geometry(\"1000x750+200+100\")\n\t\t\trootPP.configure(bg=\"black\")\n\t\t\trootPP.grid_columnconfigure(1, weight=1)\n\t\t\trootPP.grid_columnconfigure(2, weight=1)\n\t\t\trootPP.grid_columnconfigure(3, weight=1)\n\t\t\tq_l=tk.Label(rootPP,text=quize[g.val4],font=(\"HGP創英角ポップ体\",\"40\",\"bold\"),wraplength=1000,width=1000,fg='white',bg='black',height=2,pady=30)\n\t\t\tq_l.grid(row=0,column=0,columnspan=5)\n\t\t\tdef co():#g.val3\n\t\t\t\t\n\t\t\t\tif g.val3>0:\n\t\t\t\t\tg.val3=g.val3-1\n\t\t\t\t\ttime.config(text=g.val3)\n\t\t\t\t\tg.val2=rootPP.after(1000,lambda:co())\n\t\t\t\t\n\t\t\t\telif g.val3==0:\n\t\t\t\t\tbutton.invoke()\n\t\t\t\t\n\t\t\t\tif g.val3==14:\n\t\t\t\t\tbutton['state']='normal'\n\t\t\t\t\n\t\t\tdef anser(quize,fact):\n\t\t\t\trootPP.after_cancel(g.val2)\n\t\t\t\tbutton['state']='disabled'\n\t\t\t\tse.play(0)\n\t\t\t\t\n\t\t\t\ta.set(yorn.get())\n\t\t\t\tif int(a.get())==0:\n\t\t\t\t\tai='はい\\n'\n\t\t\t\telif int(a.get())==1:\n\t\t\t\t\tai='いいえ\\n'\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tdef ne(TEXT,c):\n\t\t\t\t\ttime.config(text=TEXT)\n\t\t\t\t\tif g.val4<4:\n\t\t\t\t\t\tno.config(image=n)\n\t\t\t\t\t\tyes.config(image=y)\n\t\t\t\t\t\tg.val3=16\n\t\t\t\t\t\tg.val4=g.val4+1\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\tif c==0:\n\t\t\t\t\t\t\tdef setime():\n\t\t\t\t\t\t\t\tq_l.config(text=quize[g.val4])\n\n\t\t\t\t\t\t\t\tse2.play(0)\n\t\t\t\t\t\t\t\ttime.config(text='スライムがあらわれたのじゃ!')\n\t\t\t\t\t\t\trootPP.after(1000,lambda:setime())\n\t\t\t\t\t\t\trootPP.after(1200,lambda:s.config(image=s1))\n\t\t\t\t\t\t\trootPP.after(1400,lambda:s.config(image=s0))\n\t\t\t\t\t\t\trootPP.after(1600,lambda:s.config(image=s1))\n\t\t\t\t\t\t\trootPP.after(1800,lambda:s.config(image=s0))\n\t\t\t\t\t\t\trootPP.after(2000,lambda:s.config(image=s1))\n\t\t\t\t\t\t\trootPP.after(2000,lambda: co())\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tq_l.config(text=quize[g.val4])\n\t\t\t\t\t\t\trootPP.after(1000,lambda: co())\n\t\t\t\t\telse:\n\t\t\t\t\t\trootPP.destroy()\n\t\t\t\tif ai==fact[g.val4]:\n\t\t\t\t\t\n\t\t\t\t\tse1_.play(0)\n\t\t\t\t\ttime.config(text='せいかいなのじゃ!')\n\t\t\t\t\tg.val1=g.val1+1\n\t\t\t\t\t#1000-1500やられ\n\t\t\t\t\trootPP.after(1000,lambda:s.config(image=s2))\n\t\t\t\t\trootPP.after(1100,lambda:s.config(image=s1))\n\t\t\t\t\trootPP.after(1200,lambda:s.config(image=s2))\n\t\t\t\t\trootPP.after(1300,lambda:s.config(image=s1))\n\t\t\t\t\trootPP.after(1400,lambda:s.config(image=s2))\n\t\t\t\t\trootPP.after(1500,lambda:s.config(image=s0))\n\t\t\t\t\tTEXT='スライムをたおしたのじゃ!!'\n\t\t\t\t\trootPP.after(2000,lambda:ne(TEXT,0))\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tse2_.play(0)\n\t\t\t\t\ttime.config(text='ありゃ、ちがうようじゃ')\n\t\t\t\t\tTEXT='スライムがまだいるのじゃ'\n\t\t\t\t\trootPP.after(2000,lambda:ne(TEXT,1))\n\t\t\t#def anser(f,id_,quize(sorezoreno))\n\t\t\t\t#ボタン無効化\n\t\t\t\t#正誤判定 kekka resalut\n\t\t\t\t#g.val3=resalt (yn_lのてxt\n\t\t\t\t#3sec g.val3=15\n\t\t\t\t\n\t\t\t\t#ボタン有効か\n\t\t\tdef see(nam):\n\t\t\t\tse.play(0)\n\t\t\t\tif nam==0:\n\t\t\t\t\tyes.config(image=y2)\n\t\t\t\t\tno.config(image=n)\n\t\t\t\telif nam==1:\n\t\t\t\t\tno.config(image=n2)\n\t\t\t\t\tyes.config(image=y)\n\t\t\t#suraimusuraimu\n\t\t\ts0=tk.PhotoImage(file='s_0.png',master=rootPP)#なし\n\t\t\ts0=s0.subsample(3)\n\t\t\ts1=tk.PhotoImage(file='s_1.png',master=rootPP)#通常\n\t\t\ts1=s1.subsample(3)\n\t\t\ts2=tk.PhotoImage(file='s_2.png',master=rootPP)#赤\n\t\t\ts2=s2.subsample(3)\n\t\t\t\n\t\t\t###\n\t\t\t\n\t\t\ttime = tk.Label(rootPP,text='スライムがあらわれたのじゃ!',font=(\"HGP創英角ポップ体\",\"50\",\"bold\"),fg='red',bg='black')\n\t\t\ttime.grid(row=1, column=0,columnspan=5)\n\t\t\t\n\t\t\ts=tk.Label(rootPP,image=s0,relief='solid')\n\t\t\ts.grid(row=2,column=2)\n\t\t\t\n\t\t\t\n\t\t\ty=tk.PhotoImage(file='mark_yes_no_hai.png',master=rootPP)\n\t\t\ty=y.subsample(4)\n\t\t\tn=tk.PhotoImage(file='mark_yes_no_iie.png',master=rootPP)\n\t\t\tn=n.subsample(4)\n\t\t\ty2=tk.PhotoImage(file='mark_yes_no_hai2.png',master=rootPP)\n\t\t\ty2=y2.subsample(4)\n\t\t\tn2=tk.PhotoImage(file='mark_yes_no_iie2.png',master=rootPP)\n\t\t\tn2=n2.subsample(4)\n\t\t\t\n\t\t\ta=tk.IntVar()\n\t\t\tyorn=tk.IntVar()\n\t\t\tyes=tk.Radiobutton(rootPP,value=0,variable=yorn,image=y,indicatoron=\"False\",command=lambda:see(0))\n\t\t\tyes.grid(row=3,column=1)\n\t\t\t\n\t\t\tno=tk.Radiobutton(rootPP,value=1,variable=yorn,image=n,indicatoron=\"False\",command=lambda:see(1))\n\t\t\tno.grid(row=3,column=3)\n\t\t\tbutton=tk.Button(rootPP,text='けってい!!',font=(\"HGP創英角ポップ体\",\"25\",\"bold\"),fg='red',relief=\"solid\", command=lambda: anser(quize,fact))\n\t\t\tbutton['state']='disabled'\n\t\t\tbutton.grid(row=4, column=1,columnspan=3)\n\t\t\t\n\t\t\trootPP.after(200,lambda:s.config(image=s1))\n\t\t\trootPP.after(400,lambda:s.config(image=s0))\n\t\t\trootPP.after(600,lambda:s.config(image=s1))\n\t\t\trootPP.after(800,lambda:s.config(image=s0))\n\t\t\trootPP.after(1000,lambda:s.config(image=s1))\n\t\t\trootPP.after(1000,lambda:co())\n\t\t\trootPP.mainloop()\n\t\t\t###\n\t\t\t\n\t\tse2.play(0)\n\t\tgame(F,quize,fact)\n\t\t\t\n\t\t\t\n\t\t\t#kugiri\n\t\t\n\t\tbgm2.stop()\n\t\tbgm3.play(0)\n\t\trootE=tk.Tk()\n\t\trootE.configure(bg=\"black\")\n\t\trootE.title('おしまい')\n\t\t\n\t\tif g.val1==5:\n\t\t\t\n\t\t\tlabela=tk.Label(rootE,text='ぜんぶのスライムを\\nたおしたのじゃ!',font=(\"HGP創英角ポップ体\",\"50\",\"bold\"),bg='black',fg='white')\n\t\t\tg.val1=0\n\t\t\tI=tk.PhotoImage(file='perfect.png',master=rootE)\n\t\t\tlabela.grid(row=0,column=0)\n\t\t\tILUST=tk.Label(rootE,image=I,relief='solid')\n\t\t\tILUST.grid(row=1,column=0)\n\t\telse:\n\t\t\t\n\t\t\tlabela=tk.Label(rootE,text='{0}たいの スライムを\\nたおしたのじゃ'.format(g.val1),font=(\"HGP創英角ポップ体\",\"50\",\"bold\"),bg='black',fg='white')\n\t\t\tlabela.grid(row=0,column=0,columnspan=g.val1)\n\t\t\t#ばってん\n\t\t\tI=tk.PhotoImage(file='s_3.png',master=rootE)\n\t\t\tI=I.subsample(3)\n\t\t\tILUST=[None]*g.val1\n\t\t\tfor i in range(g.val1):\n\t\t\t\tILUST[i]=tk.Label(rootE,image=I,relief='solid')\n\t\t\t\tILUST[i].grid(row=1,column=i)\n\t\t\t\n\t\tg.val1=0\n\t\t\n\t\t\n\t\t\n\t\trootE.after(5000,lambda: rootE.destroy())\n\t\trootE.mainloop()\n\t\t\n\t\t\n\t\t\n\tg.val1=0\n\tL=glob.glob(\".\\quizedata\\*.txt\")\n\tqlist=[None]\n\tfor lists in L:\n\t\tlists=lists.removeprefix('.\\\\quizedata\\\\')\n\t\tlists=lists.removesuffix('.txt')\n\t\tqlist=qlist+[lists]\n\t\t\n\tdel qlist[0]\n\t\n\trootP=tk.Tk()\n\trootP.geometry(\"1180x750+100+50\")\n\trootP.configure(bg=\"black\")\n\trootP.title('あそぶ')\n\tsetumei=tk.Label(rootP,text='スライムが あらわれた!',font=(\"HGP創英角ポップ体\",\"50\",\"bold\"),bg='black',fg='red')\n\tsetumei2=tk.Label(rootP,text='クイズに せいかいすると スライムをたおせるのじゃ!',font=(\"HGP創英角ポップ体\",\"30\",\"bold\"),bg='black',fg='white')\n\tsetumei.grid(row=0,column=0,columnspan=2)\n\tsetumei2.grid(row=1,column=0,columnspan=2)\n\ts=tk.PhotoImage(file='s_1.png',master=rootP)\n\ts=s.subsample(4)\n\ts_L=tk.Label(rootP,image=s,relief='solid')\n\ts_L.grid(row=0,column=2,rowspan=2)\n\t\n\tqlists=tk.StringVar(value=qlist)\n\tql_m=tk.Listbox(rootP,font=(\"HGP創英角ポップ体\",\"30\",\"bold\"),listvariable=qlists,height=7)\n\tql_m.grid(row=2,column=1,pady=30,columnspan=2)\n\tscrollbar =ttk.Scrollbar(rootP,orient='vertical',command=ql_m.yview)\n\tql_m['yscrollcommand']=scrollbar.set\n\tscrollbar.grid(row=2,column=3,sticky=('N','S'),pady=30)\n\tql_l=tk.Label(rootP,text='みぎから\\nあそびたいクイズを\\nえらぶのじゃ',font=(\"HGP創英角ポップ体\",\"50\",\"bold\"),relief=\"solid\",fg='white',bg='black')\n\tql_l.grid(row=2,column=0,pady=40)\n\t\n\t\n\tbutton = tk.Button(rootP, text=\"クイズをはじめる!\",font=(\"HGP創英角ポップ体\",\"50\",\"bold\"),fg='red',bg='white', command=get_entry_text)\n\tbutton.grid(row=3, column=0, columnspan=3, padx=10, pady=5)\n\t\n\t\n\t\n\trootP.mainloop()\n\n","repo_name":"Abelia-flower/CTRL_Launcher2023SC","sub_path":"data/marubatu/src/PLAY.py","file_name":"PLAY.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"44839664417","text":"################################# FINAL SETTING! \n# model settings\nvoxel_size = [0.05, 0.05, 0.1]\npoint_cloud_range = [0, -40, -3, 70.4, 40, 1]\n\n# MOCO Model\nmodel = dict(\n # type='Inter_Intro_moco',\n type='Inter_Intro_moco_better',\n\n img_backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=-1,\n # norm_cfg=dict(type='BN'), # for debug\n norm_cfg=dict(type='SyncBN', eps=1e-3, momentum=0.01),\n norm_eval=False,\n style='pytorch'),\n\n # With MOCO\n pts_backbone=dict(\n type='PointNet2SAMSG',\n in_channels=4,\n num_points=(4096, 1024, (512, 512)),\n radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)),\n num_samples=((32, 32, 64), (32, 32, 64), (16, 16, 16)),\n sa_channels=(((32, 32, 64), (32, 32, 64), (64, 64, 128)),\n ((64, 64, 128), (64, 64, 128), (128, 128, 256)),\n ((128, 128, 256), (128, 128, 256), (256, 256, 512))),\n aggregation_channels=(128, 256, 1024),\n fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')),\n fps_sample_range_lists=((-1), (-1), (512, -1)),\n norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1),\n sa_cfg=dict(\n type='PointSAModuleMSG',\n pool_mod='max',\n # use_xyz=True,\n use_xyz=False,\n normalize_xyz=False)),\n\n # model training and testing settings\n train_cfg=dict(\n cl_strategy = dict(\n pts_intro_hidden_dim=1024,\n pts_intro_out_dim=128,\n img_inter_hidden_dim=2048,\n img_inter_out_dim=128,\n pts_inter_hidden_dim=1024,\n pts_inter_out_dim=128,\n pts_feat_dim=1024,\n img_feat_dim=2048,\n K=8192*4,\n m=0.999,\n T=0.07,\n points_center=[35.2, 0, -1],\n cross_factor=1,\n moco=False,\n simsiam=False,\n ############################################\n img_moco=False,\n point_intro=True, # intro-loss\n point_branch=True # if pts backbone\n )))\n\n# dataset settings\ndataset_type = 'KittiDataset'\ndata_root = 'data/kitti/'\nclass_names = ['Pedestrian', 'Cyclist', 'Car']\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\ninput_modality = dict(use_lidar=True, use_camera=True)\n# db_sampler = dict(\n# data_root=data_root,\n# info_path=data_root + 'kitti_dbinfos_train.pkl',\n# rate=1.0,\n# prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n# classes=class_names,\n# sample_groups=dict(Car=15))\n\nfile_client_args = dict(backend='disk')\n# Uncomment the following if use ceph or other file clients.\n# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient\n# for more details.\n# file_client_args = dict(\n# backend='petrel', path_mapping=dict(data='s3://kitti_data/'))\n\ntrain_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='LoadImageFromFile'),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), # filter range\n dict(type='IndoorPointSample', num_points=16384), # sample here only for pretrain!\n dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n\n ############################## \n dict(\n type='Resize',\n # img_scale=[(640, 192), (2560, 768)],\n img_scale=[(640, 192), (2400, 720)],\n multiscale_mode='range',\n keep_ratio=True),\n ##############################\n\n dict(\n type='GlobalRotScaleTrans',\n # rot_range=[-0.78539816, 0.78539816],\n # scale_ratio_range=[0.95, 1.05],\n rot_range=[-1.5707963, 1.5707963],\n scale_ratio_range=[0.75, 1.25],\n translation_std=[0, 0, 0],\n points_center=[35.2, 0, -1]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n # dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(\n type='Collect3D',\n keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d', 'points_ori']),\n]\n\ntest_pipeline = [] # No need to test\n\n# for dataset\npretraining=True\ncross=True # for cross pretrain\ndata = dict(\n samples_per_gpu=4,\n workers_per_gpu=4,\n # samples_per_gpu=3,\n # workers_per_gpu=3,\n train=dict(\n type='RepeatDataset',\n times=1,\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_train.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=train_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=False,\n pretraining=True,\n cross=True,\n # we use box_type_3d='LiDAR' in kitti and nuscenes dataset\n # and box_type_3d='Depth' in sunrgbd and scannet dataset.\n box_type_3d='LiDAR')),\n \n # actually there is no val\n val=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True,\n pretraining=True,\n box_type_3d='LiDAR'))\n\n# Not be used in pretrain\nevaluation = dict(start=9999, interval=1) # No use\n\n\n# optimizer\noptimizer = dict(\n constructor='HybridOptimizerConstructor',\n pts=dict(\n type='AdamW',\n # lr=0.002,\n lr=0.001,\n betas=(0.95, 0.99),\n weight_decay=0.01,\n step_interval=1),\n img=dict(\n type='SGD',\n # lr=0.03,\n lr=0.03,\n momentum=0.9,\n weight_decay=0.0001,\n step_interval=1),\n mlp=dict(\n type='SGD',\n # lr=0.03,\n lr=0.03,\n momentum=0.9,\n weight_decay=0.0001,\n step_interval=1))\n\n \n# optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\noptimizer_config = dict(grad_clip=None)\n# lr_config = dict(policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_iters=10, warmup_ratio=0.001, warmup_by_epoch=True)\nlr_config = dict(policy='Exp', gamma=0.99)\n\n# runtime settings\ncheckpoint_config = dict(interval=5)\n\n# yapf:disable\nlog_config = dict(\n interval=30,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = None\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n\ntotal_epochs = 100\n\nrunner = dict(type='EpochBasedRunner', max_epochs=total_epochs)\n\nfind_unused_parameters=True # I cannot find it","repo_name":"zhyever/SimIPU","sub_path":"project_cl/configs/simipu/simipu_kitti.py","file_name":"simipu_kitti.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"27"}
+{"seq_id":"5003104714","text":"\"\"\"\n3. Napiši program koji upisuje bodove n plesnih natjecanja.\n Ispiši zbroj svih bodova tako da odbaciš najbolji i najlošiji rezultat.\n\"\"\"\n\nn = 5 # broj natjecanja\nlistaBodova = []\nfor i in range(1, n+1):\n bodovi = int(input(f\"Unesi bodove {i}. natjecanja: \"))\n listaBodova.append(bodovi)\nprint(\"Svi unešeni bodovi:\", listaBodova)\n# Ukloni min i max iz liste bodova\nlistaBodova.remove(min(listaBodova))\nlistaBodova.remove(max(listaBodova))\nprint(\"Lista bez min i max vrijednosti:\", listaBodova)\nprint(\"Suma bodova:\", sum(listaBodova))","repo_name":"mirjambe/pythonZadPredOOP","sub_path":"zad_3.py","file_name":"zad_3.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13556043024","text":"#Python Program that prints all real solutions to the quadratic equation ax2+bx+c = 0. Read in a, b, c and use the quadratic formula. If the discriminate b2-4ac is negative, display a message stating that there are no real solutions.\n\nimport cmath\n\na = int(input())\nb = int(input())\nc = int(input())\n\nd = (b**2) - (4*a*c)\nd1 = cmath.sqrt(d)\ns1 = -b - (d1/(2*a))\ns2 = -b + (d1/(2*a))\n\nprint(s1)\nprint(s2)\n","repo_name":"Rajpreet16/basic_python_codes.","sub_path":"18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"9885554167","text":"import numpy as np\nimport os\nimport math\n\nclass Message:\n pathname=None\n content=None\n content_length = 0\n header = None\n header_length = 0\n file_name = None\n file_extension = None\n binary_digit_list=None\n\n def __init__(self, pathname = None):\n if pathname!= None:\n with open(pathname,'rb') as f:\n self.content=f.read()\n self.content_length = len(self.content)\n self.file_name, self.file_extension = os.path.splitext(pathname)\n self.file_name = self.file_name.split('/')[-1]\n\n def to_binary(self,msg=None):\n if msg is None:\n msg=self.content\n return [format(i,'08b') for i in msg]\n\n def num_to_binary_list(self,num):\n binary_list=[]\n binary=bin(num).replace(\"0b\",\"\")\n for digit in np.array(list(binary)):\n binary_list.append(digit.astype(int))\n while len(binary_list)<8:\n binary_list.insert(0,0)\n return binary_list\n\n def binary_to_dcible(self,binary_list):\n\n in_binary_str=[str(digit) for digit in binary_list]\n in_binary=''.join(in_binary_str)\n return int(in_binary,2)\n\n def to_binary_list(self,binary_msg=None):\n if binary_msg is None:\n binary_msg=self.to_binary()\n binary_list=[]\n temp = np.array([list(i) for i in binary_msg])\n for b_list in temp:\n for b_digit in b_list:\n binary_list.append(b_digit.astype(int))\n return binary_list\n \n def create_message_header(self):\n msg_header_string = \"\"\n temp = []\n msg_header_string += self.file_name + \";\"\n msg_header_string += self.file_extension + \";\"\n msg_header_string += str(self.content_length) + \";\"\n #print(msg_header_string)\n \n self.header = msg_header_string.encode('utf-8')\n\n header_binary_msg=self.to_binary(self.header)\n header_binary_list=self.to_binary_list(header_binary_msg)\n self.header_length = len(header_binary_list)\n\n return self.num_to_binary_list(self.header_length) + header_binary_list\n \n def create_message_content(self):\n return self.to_binary_list()\n\n def create_binary_list(self):\n self.binary_digit_list=self.create_message_header() +self.create_message_content()\n return self.binary_digit_list\n\n def extract_msg(self,binary_list):\n self.header_length=self.binary_to_dcible(binary_list[0:8])\n self.header=self.extract_header(binary_list[8:8+self.header_length],self.header_length)\n\n\n header_items=self.header.split(';')\n self.content_length=int(header_items[-2])\n self.file_name=header_items[0]\n self.file_extension=header_items[1]\n\n temp=binary_list[8+self.header_length:]\n self.content=self.extract_content(temp, self.content_length)\n\n self.write_msg(file_name='output',)\n return self.content\n\n def extract_header(self,header_binary_list,length_of_list):\n header_char_size=8\n header_str=\"\"\n for start in range(0,length_of_list,header_char_size):\n temp=header_binary_list[start:start+header_char_size]\n header_str+=chr(self.binary_to_dcible(temp))\n return header_str\n\n def extract_content(self,content_binary_list,length_of_content):\n content_char_size=8\n content_byte_array=bytearray()\n #print(content_binary_list)\n count=1\n start=0\n while count<=length_of_content:\n temp=content_binary_list[start:start+content_char_size]\n byte=self.binary_to_dcible(temp)\n content_byte_array.append(byte)\n start=start+content_char_size\n count+=1\n\n return content_byte_array\n\n def write_msg(self,file_name=None):\n if file_name == None:\n file_name = self.file_name\n file_name += self.file_extension\n\n with open(file_name, 'wb') as fout:\n fout.write(self.content)\n\n\n\n\n\n\ndef main():\n m=Message(\"Ape_Face.bmp\")\n #print(m.create_message_header()[8:])\n #print(m.create_message_content())\n #print(m.create_binary_list())\n lis=m.create_binary_list()\n lis=lis+[1,0,0,1,1,1,0,1,0,0,1,0,0,1]\n m.extract_msg(lis)\n # m.extract_msg(\n # [0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0])\n\nif __name__==\"__main__\":\n main()","repo_name":"pritam19798/LSB-Steganography","sub_path":"message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"602482609","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer,ENGLISH_STOP_WORDS\nfrom sklearn.metrics.pairwise import cosine_similarity\n#from streamlit.cli import main\nimport streamlit as st\n\ndata=pd.read_csv('/home/tilak/MCAD/IMDb movies.csv',low_memory = False)\n#movie = \"The Avengers\"\nmovie = st.text_input(\"Enter your movie\")\ndata = data[data['votes']>data['votes'].mean()]\n#print(data.info())\ndata['desp']=data['director'].astype(str) + \"\\n\" + data['writer'].astype(str) + \"\\n\" + data['production_company'].astype(str) + \"\\n\" + data['actors'].astype(str) + \"\\n\" + data['description'].astype(str)\ntfidf_vectorizer = TfidfVectorizer(stop_words=ENGLISH_STOP_WORDS,ngram_range=(1, 2), max_df=0.8, token_pattern=r'\\b[^\\d\\W][^\\d\\W]+\\b',min_df=10)\ntfidf_matrix = tfidf_vectorizer.fit_transform([x for x in data[\"desp\"]])\ncosine_similarity_df=cosine_similarity(tfidf_matrix)\ncosine_simialarity=pd.DataFrame(cosine_similarity_df,index=data.title,columns=data.title)\n#print(cosine_simialarity.head())\nChoice=cosine_simialarity.loc[:,movie]\nOrdered_similarities=Choice.sort_values(ascending=False)\nRecommendations=pd.DataFrame(Ordered_similarities)\nst.table(Recommendations.iloc[1:11].index)\n#print(\"The selected movie is \",movie)","repo_name":"Preethi-3105/Movie-Recommendation-System","sub_path":"reccomandation.py","file_name":"reccomandation.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"39780826124","text":"from numpy import int64\nfrom pandas.core.frame import DataFrame\nfrom utils import concat, sanitizeInt64, sanitizeStr\n\nclass DescripcionFuncion:\n \"\"\"\n Describe una funcion en el presupuesto, con el identificador del ejercicio presupuestario, la finalidad, la función y una descripción simple\n \"\"\"\n ejercicio_presupuestario: int\n finalidad_id: int64\n funcion_id: int64\n descripcion: str\n\n\ndef generarIdentificadorUniversal(DescFunc: DescripcionFuncion) -> str:\n \"\"\"\n Genera un string que contiene <>.<>.<>\n \"\"\"\n ep = DescFunc.ejercicio_presupuestario\n fin = DescFunc.finalidad_id\n fun = DescFunc.funcion_id\n return concat([str(ep), str(fin), str(fun)])\n\n\ndef generarCodigoFinalidadFuncion(DescFunc: DescripcionFuncion) -> str:\n \"\"\"\n Genera un string que contiene <>.<>\n \"\"\"\n fin = DescFunc.finalidad_id\n fun = DescFunc.funcion_id\n return concat([str(fin), str(fun)])\n\n\ndef extraerDescripcion(dataFrame: DataFrame) -> DescripcionFuncion:\n \"\"\"\n Extrae la información de ejercicio_presupuestario, finalidad_id, funcion_id\n y funcion_desc para crear un objecto DescripcionFuncion\n \"\"\"\n\n indexValues = dataFrame.index.values\n if (len(indexValues) > 1):\n raise ValueError(\"you passed a data frame with more than one row\")\n loc = indexValues[0]\n\n row = dataFrame.loc[loc]\n ep = row.at[\"ejercicio_presupuestario\"]\n fin_id = sanitizeInt64(row.at[\"finalidad_id\"])\n fun_id = sanitizeInt64(row.at[\"funcion_id\"])\n desc = sanitizeStr(row.at[\"funcion_desc\"])\n\n funcion = DescripcionFuncion()\n funcion.ejercicio_presupuestario = ep\n funcion.finalidad_id = fin_id\n funcion.funcion_id = fun_id\n funcion.descripcion = desc\n\n return funcion\n","repo_name":"nahuelrabey/presupuesto-educativo","sub_path":"DescripcionPorFuncion.py","file_name":"DescripcionPorFuncion.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"44031171379","text":"from __future__ import absolute_import , unicode_literals\n\nfrom celery import shared_task\nfrom celery.utils.log import get_task_logger\n\nimport gc\n\nimport pandas as pd\n\nfrom .models import Company , UploadLog\n\nfrom django.core.files.storage import FileSystemStorage\nfrom django.db.models import F\nfrom django.utils import timezone\n\n\n\nlogger = get_task_logger(__name__)\n\n\ndef clean_data(df):\n df['industry'] = df['industry'].fillna('not available').astype(str)\n df['locality'] = df['locality'].fillna('not available').astype(str)\n df['country'] = df['country'].fillna('not available').astype(str)\n df['linkedin url'] = df['linkedin url'].fillna('not available').astype(str)\n df['domain'] = df['domain'].fillna('not available').astype(str)\n df['name'] = df['name'].fillna('not available').astype(str)\n df['year founded'] = df['year founded'].fillna(0).astype(int)\n if df['domain'].dtype == object :\n df['size range'] = df['size range'].apply(lambda a: a.replace('+',''))\n df['size range'] = df['size range'].fillna('not available').astype(str)\n df['current employee estimate'] = df['current employee estimate'].fillna(0).astype(int)\n df['total employee estimate'] = df['total employee estimate'].fillna(0).astype(int)\n return df\n\n\n@shared_task\ndef process_file_upload(temp_folder,filename, username):\n fs = FileSystemStorage(location=temp_folder)\n\n row_count=0\n with open(fs.path(filename), 'r') as csvfile:\n row_count = sum(1 for row in csvfile)\n log_entry = UploadLog(user = username , file_name = filename , total_rows = row_count - 1 , process_rows = 0 ) \n log_entry.save()\n\n logger.info('Task Started For uploading data ============================')\n # Read the CSV file and insert the data into the database\n chunk_size = 100000 # Number of rows per chunk\n with open(fs.path(filename), 'r') as csvfile:\n for chunk in pd.read_csv(csvfile, chunksize=chunk_size):\n companies_to_insert = []\n logger.info('processing chunk===================')\n chunk = clean_data(chunk)\n for index, row in chunk.iterrows():\n company = Company(\n name=row['name'],\n domain=row['domain'],\n year_founded=row['year founded'],\n industry=row['industry'],\n size_range=row['size range'],\n locality=row['locality'],\n country=row['country'],\n linkedin_url=row['linkedin url'],\n current_employee_estimate=row['current employee estimate'],\n total_employee_estimate=row['total employee estimate']\n )\n companies_to_insert.append(company)\n\n # Insert the companies in bulk\n Company.objects.bulk_create(companies_to_insert)\n \n UploadLog.objects.filter(id=log_entry.id).update(process_rows=F('process_rows') + chunk.shape[0] )\n gc.collect()\n\n UploadLog.objects.filter(id=log_entry.id).update(end_time = timezone.now() , status = 'Complete')\n\n","repo_name":"sidrumade/catalyst-project","sub_path":"catalyst/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"27311600121","text":"\nimport pyodbc as p1\nimport os\nimport json\n\n##### connect to the SQL Server DB and insert data\ndef insertIntoSQL(allDicts, LoadIndicator):\n #preparing conection string for SQL Server connection\n conn = p1.connect(r'Driver={SQL Server}; Server=DESKTOP-R6USKL7\\MSSQL_DNST; Database=MortgageData; Trusted_Connection=yes;')\n cursor = conn.cursor()\n cursor.fast_executemany = True\n for dicts in allDicts:\n # preparing SQL Query for each type of files\n if LoadIndicator == 1:\n insertQuery = \"INSERT INTO dbo.LoanData (LoanID , Amount, FICO, DTI, HighBalFlag, PropOcc, PropState, PropType, Purpose) \" \\\n \"VALUES ( '\" + str(dicts[\"LoanID\"]) + \"' , \" \\\n + str(dicts[\"Amount\"]) + \" , \" \\\n + str(dicts[\"FICO\"]) + \", \" \\\n + str(dicts[\"DTI\"]) + \", '\" \\\n + str(dicts[\"HighBalFlag\"]) + \"', '\" \\\n + str(dicts[\"PropOcc\"]) + \"' , '\" \\\n + str(dicts[\"PropState\"]) + \"' , '\" \\\n + str(dicts[\"PropType\"]) + \"' , '\" \\\n + str(dicts[\"Purpose\"]) + \"' )\"\n elif LoadIndicator == 2:\n insertQuery = \"INSERT INTO dbo.PoolOptionData (Pool_Option_j, Pool_Type, Pool_Balance_Type, Agency, Servicer) \" \\\n \"VALUES ( '\" + str(dicts[\"Pool Option, j\"]) + \"' , '\" \\\n + str(dicts[\"Pool Type\"]) + \"' , '\" \\\n + str(dicts[\"Pool Balance Type\"]) + \"', '\" \\\n + str(dicts[\"Agency\"]) + \"', '\" \\\n + str(dicts[\"Servicer\"]) + \"' )\"\n elif LoadIndicator == 3:\n insertQuery = \"INSERT INTO dbo.EligiblePriceComb (LoanID, Price_P_ijk, Pool_Opton_j, Servicer_k) \" \\\n \"VALUES ( '\" + str(dicts[\"LoanID\"]) + \"' , \" \\\n + str(dicts[\"Price, P_ijk\"]) + \" , '\" \\\n + str(dicts[\"Pool Opton, j\"]) + \"', '\" \\\n + str(dicts[\"Servicer, k\"]) + \"' )\"\n elif LoadIndicator == 4:\n insertQuery = \"INSERT INTO dbo.BaseLine (LoanID, Price, Selected_Pool_ID, Servicer) \" \\\n \"VALUES ( '\" + str(dicts[\"Loan ID\"]) + \"' , \" \\\n + str(dicts[\"Price\"]) + \" , '\" \\\n + str(dicts[\"Selected Pool ID\"]) + \"', '\" \\\n + str(dicts[\"Servicer\"]) + \"' )\"\n elif LoadIndicator == 5:\n insertQuery = \"INSERT INTO dbo.OptLoanData (cID, AmortTerm, Amount, cCode , Channel, DTI, FICO, HighBalFlag, LockType, LTV, PropOcc, PropState , PropType, PropUnits, Purpose , RefiType ) \" \\\n \"VALUES ( '\" + str(dicts[\"cID\"]) + \"' , \" \\\n + str(dicts[\"AmortTerm\"]) + \" , \" \\\n + str(dicts[\"Amount\"]) + \", '\" \\\n + str(dicts[\"cCode\"]) + \"', '\" \\\n + str(dicts[\"Channel\"]) + \"', \" \\\n + str(dicts[\"DTI\"]) + \", \" \\\n + str(dicts[\"FICO\"]) + \", '\" \\\n + str(dicts[\"HighBalFlag\"]) + \"', '\" \\\n + str(dicts[\"LockType\"]) + \"', \" \\\n + str(dicts[\"LTV\"]) + \", '\" \\\n + str(dicts[\"PropOcc\"]) + \"', '\" \\\n + str(dicts[\"PropState\"]) + \"', '\" \\\n + str(dicts[\"PropType\"]) + \"', \" \\\n + str(dicts[\"PropUnits\"]) + \", '\" \\\n + str(dicts[\"Purpose\"]) + \"', '\" \\\n + str(dicts[\"RefiType\"]) + \"' )\"\n elif LoadIndicator == 6:\n insertQuery = \"INSERT INTO dbo.LoanResultData ( cId , [Rank], Price, ValMethodID, ValSpecID, Coupon, DerivedSettleOrdinal ) \" \\\n \"VALUES ( '\" + str(dicts[\"cId\"]) + \"' , \" \\\n + str(dicts[\"Rank\"]) + \" , \" \\\n + str(dicts[\"Price\"]) + \" , '\" \\\n + str(dicts[\"ValMethodID\"]) + \"', '\" \\\n + str(dicts[\"ValSpecID\"]) + \"', \" \\\n + str(dicts[\"Coupon\"]) + \", '\" \\\n + str(dicts[\"DerivedSettleOrdinal\"]) + \"' )\"\n cursor.execute(insertQuery)\n cursor.commit()\n cursor.close()\n conn.close()\n\n######################################### Main Function ###############################################\ndef main(arg, LoadIndic):\n allDicts = []\n source_folder = arg\n\n #iterating through all the files in folder\n for file in os.listdir(source_folder):\n #concataneting foler and file name for each file\n full_filename = \"%s/%s\" % (source_folder, file)\n with open(full_filename, 'r') as fi:\n dict = json.load(fi)\n # collecting records of each JSON file into the list\n allDicts.append(dict)\n fi.close()\n #calling a function to insert all records of JSON file\n insertIntoSQL(allDicts, LoadIndic)\n\nif __name__ == \"__main__\":\n #set the name of folder which includes all JSON files of Loan data\n\n \"\"\"\n DataFolder = \"Pool Optimization Data for TC v5/Loan Data\"\n print('Data extraction step is started here ... ')\n main(DataFolder, 1)\n print('Successfully extracted data from Loan Data JSON file')\n\n DataFolder = \"Pool Optimization Data for TC v5/Pool Option Data\"\n main(DataFolder, 2)\n print('Successfully extracted data from Pool option Data JSON file')\n\n DataFolder = \"Pool Optimization Data for TC v5/Eligible Pricing Combinations\"\n main(DataFolder, 3)\n print('Successfully extracted data from Eligible Pricing Combinations Data JSON file')\n\n DataFolder = \"Pool Optimization Data for TC v5/Baseline (Constraints Set B)\"\n main(DataFolder, 4)\n print('Successfully extracted data from Baseline (Constraints Set B) Data JSON file')\n \n\n DataFolder = \"D:/Items/MIU/1. №-1 Career/Technical Assessment/Black Knight/Mortgage Pooling/Mortgage Pooling/Optimization Data 1/Loan Data\"\n print('Data extraction step is started here ... ')\n main(DataFolder, 5)\n print('Successfully extracted data from Loan Data JSON file')\n \n \"\"\"\n\n DataFolder = \"D:/Items/MIU/1. №-1 Career/Technical Assessment/Black Knight/Mortgage Pooling/Mortgage Pooling/Optimization Data 2/Loan Results Data\"\n print('Data extraction step is started here ... ')\n main(DataFolder, 6)\n print('Successfully extracted data from Loan Data JSON file')\n","repo_name":"DNSTM/Mortgage-Data-analysis","sub_path":"1. Data Extraction/LoanDataLoad.py","file_name":"LoanDataLoad.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"38474376512","text":"\"\"\"\n===== Initial Thoughts =====\nwe could do a string split but that takes up memory space...\nwhy don't we just count the number of spaces and add 1... only exception is \"\" which is 0\n\n~~Complexity Analysis\nTime - O(n)\nSpace - O(1)\n\nBefore I submit, I'm realizing we need to strip... can we do this without strip()?\n\nand actually, what about something like \"iasjdo adi asldf \" -> that should have 3, right?\n\nwhat if we count non-space chars limiting continuous segments by 1\nwe iterate through the string, assigning a counter to 1 if non-space. Once we hit a space, we add the 1\nat the end, we add whatever is on there\n\"\"\"\n\nclass Solution:\n def countSegments(self, s: str) -> int:\n count = 0\n current = 0\n for char in s:\n if char != \" \":\n current = 1\n else:\n count += current\n current = 0\n return count + current","repo_name":"jsphweid/chops","sub_path":"lc/answers/number-of-segments-in-a-string/2021.10.02-11.15.23.py","file_name":"2021.10.02-11.15.23.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"72097466316","text":"# description : https://leetcode.com/problems/palindrome-linked-list/description/ # NOQA\n# author : Hussam\n# solution : two pointers\n# o(n) time, o(1) space\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if head is None:\n return True\n\n mid = self.get_mid_node(head)\n right = mid.next\n mid.next = None\n\n return self.compare(head, self.rotate(right))\n\n def get_mid_node(self, head):\n slow = head\n fast = head.next\n\n while fast is not None and fast.next is not None:\n fast = fast.next.next\n slow = slow.next\n\n return slow\n\n def rotate(self, head):\n prev = None\n while head is not None:\n temp = head.next\n head.next = prev\n prev = head\n head = temp\n\n return prev\n\n def compare(self, h1, h2):\n while h1 is not None and h2 is not None:\n if h1.val != h2.val:\n return False\n h1 = h1.next\n h2 = h2.next\n\n return True\n","repo_name":"hussamEL-Hwary/my-problem-solving","sub_path":"leetcode/two pointers/forwarding list/palindrome_linked_list.py","file_name":"palindrome_linked_list.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"7043639118","text":"import csv\nimport pprint\n\nimport os\ncpath = os.path.dirname(os.path.abspath(__file__))\n\ndef read_question_types():\n floc = cpath + '/piano_components/Question_Types.csv'\n question_types = []\n with open(floc,'r') as ft:\n rd = csv.DictReader(ft)\n for row in rd:\n question_types.append(row)\n\n return question_types\n\ndef read_filter_types():\n floc = cpath + '/piano_components/Filters.csv'\n filters = []\n with open(floc,'r') as ft:\n rd = csv.DictReader(ft)\n for row in rd:\n filters.append(row)\n\n return filters\n\ndef read_subject_types():\n floc = cpath + '/piano_components/Subject_List.csv'\n subs = []\n with open(floc,'r') as ft:\n rd = csv.DictReader(ft)\n for row in rd:\n subs.append(row)\n\n return subs","repo_name":"kdenny/phl-django-apps","sub_path":"maestro/read_files.py","file_name":"read_files.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"40925996880","text":"'''Faça um programa que receba três números inteiros e informe qual deles é o maior e qual deles é\r\no menor.'''\r\n\r\nn1 = int(input(\"Informe o Primeiro Numero \"))\r\n\r\nmaior = n1\r\nmenor = n1\r\n\r\nn2 = int(input(\"Informe o Segundo Numero \"))\r\n\r\nif n2 > maior:\r\n\tmaior = n2\r\n\r\nif n2 < menor:\r\n\tmenor = n2\r\n\t\r\nn3 = int(input(\"Informe o Terceiro Numero \"))\r\n\r\nif n3 > maior:\r\n\tmaior = n3\r\n\r\nif n3 < menor:\r\n\tmenor = n3\r\n\t\r\nprint(\"O Maior numero é: \", maior)\r\nprint(\"O Menor numero é: \", menor)\r\n","repo_name":"paulo-werle/Uffs","sub_path":"Algoritimos/Aula3/exercicio71.py","file_name":"exercicio71.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"7812029713","text":"import mysql.connector as connection\nimport pandas as pd\nfrom decouple import config\nfrom datetime import datetime\n\nclass Medmon:\n def __init__(self):\n self.host= config('DB_HOST')\n self.database =config('DB_NAME')\n self.user=config('DB_USERNAME')\n self.passwd=config('DB_PASSWORD')\n self.db_dataframe=\"\"\n \n def db_connect(\n self, \n tahun=datetime.now().year, \n bulan=datetime.now().month, \n tgl=datetime.now().day, \n limit=0\n ):\n\n limit_query = \"LIMIT {}\".format(limit)\n\n if limit != 0:\n str_query = \"\\\n SELECT * from data_news \\\n WHERE news_pubday='{}' \\\n {};\".format(datetime(tahun, bulan, tgl).strftime(\"%Y-%m-%d\"), limit_query)\n else:\n str_query = \"\\\n SELECT * from data_news \\\n WHERE news_pubday='{}';\".format(datetime(tahun, bulan, tgl).strftime(\"%Y-%m-%d\"))\n \n try:\n db_medmon = connection.connect(\n host=self.host,\n database=self.database,\n user=self.user,\n passwd=self.passwd,\n use_pure=True\n )\n self.db_dataframe = pd.read_sql(str_query, db_medmon)\n db_medmon.close()\n\n return self.db_dataframe\n except Exception as e:\n return str(e)\n\n# medmon = Medmon()\n# print(medmon.host)\n# medmon.db_connect()\n\n# print(medmon.db_dataframe)\n\n\n\n# df = result_dataFrame.copy()\n# news_title = df.news_title.values\n# print(news_title[4])\n","repo_name":"gugunm/flask-for-ner","sub_path":"medmon.py","file_name":"medmon.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"72119975436","text":"import math\nimport random\n\n#Задача №17\nprint(\"Задача №17 \\n Задание: Написать функцию решения квадратного уравнения. \\n Решение:\")\ndef solve_quadratic_equation(a, b, c):\n discriminant = b**2 - 4*a*c\n if discriminant > 0:\n x1 = (-b + math.sqrt(discriminant)) / (2 * a)\n x2 = (-b - math.sqrt(discriminant)) / (2 * a)\n return x1, x2\n elif discriminant == 0:\n x1 = (-b + math.sqrt(discriminant)) / (2*a)\n x2 = None\n return x1, x2\n else:\n x1 = None\n x2 = None\n return x1, x2\nprint(solve_quadratic_equation(5, 10, -15))\n#----------------------------------------------------\n#Задача №18\nprint(\"Задача №18 \\n Задание: Каждому символу в таблице символов Unicode соответствует число. Написать функцию, которая рассчитывает сумму чисел, которые соответствуют символам, стоящим между двумя заданными включительно. Например, в функцию передаются символы ‘x’ и ‘z’. Значит надо вычислить сумму кодов символов ‘x’,’y’,’z’. \\n Решение:\")\nfirst = 'x'\nlast = 'z'\n\ndef sum_symbol_codes(first, last):\n first_ord = ord(first)\n last_ord = ord(last)\n if first_ord <= last_ord:\n lower_bound = first_ord\n upper_bound = last_ord\n else:\n lower_bound = last_ord\n upper_bound = first_ord\n sum_symbol = 0\n for symbol in range(lower_bound, upper_bound + 1):\n sum_symbol += symbol\n return sum_symbol\n\n\nprint(\" Число:\" ,sum_symbol_codes(first, last))\n#----------------------------------------------------\n#Задача №19\nprint(\"Задача №19 \\n Задание: Написать функцию для поиска разницы между максимальным и минимальным числом среди num_limit случайно сгенерированных чисел в указанном числовом диапазоне. \\n Решение:\")\nprint(\" Поиск максимального значения:\")\ndef find_max_of_n(num_limit, lower_bound, upper_bound):\n curr_max = lower_bound\n for i in range(num_limit):\n\n rand_number = random.randint(lower_bound, upper_bound)\n print(' Рандомное число: ', rand_number)\n if rand_number > curr_max:\n curr_max = rand_number\n\n return curr_max\n\nresult = find_max_of_n(5, 100, 300)\nprint(\" Максимальное значение: %d\" % result)\nprint(\"---------------\")\nprint(\" Поиск минимального значения:\")\ndef find_min_of_n(num_limit, lower_bound, upper_bound):\n curr_min = upper_bound\n for i in range(num_limit):\n\n rand_number = random.randint(lower_bound, upper_bound)\n print(' Рандомное число: ', rand_number)\n if rand_number < curr_min:\n curr_min = rand_number\n\n return curr_min\n\nresult = find_min_of_n(5, 100, 500)\nprint(\" Минимальное значение: %d\" % result)\n#----------------------------------------------------\n#Задача №20\nprint(\"Задача №20 \\n Задание: Написать функцию для поиска разницы сумм всех четных и всех нечетных чисел среди 100 случайно сгенерированных чисел в произвольном числовом диапазоне. Т.е. от суммы четных чисел вычесть сумму нечетных чисел. \\n Решение:\")\ndef diff_even_odd(num_limit, lower_bound, upper_bound):\n even_sum = 0\n odd_sum = 0\n for i in range(num_limit):\n\n rand_number = random.randint(lower_bound, upper_bound)\n print(' Рандомное число: ', rand_number)\n if rand_number % 2 == 0:\n even_sum += rand_number\n else:\n odd_sum += rand_number\n\n print(\" Сумма четных чисел: %d\" % even_sum)\n print(\" Сумма нечетных чисел: %d\" % odd_sum)\n return even_sum - odd_sum\n\nprint(diff_even_odd(100, 10, 100))\n#----------------------------------------------------\n","repo_name":"AvengerDima/Python_Homeworks","sub_path":"Homework_5(17-20).py","file_name":"Homework_5(17-20).py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"32217135407","text":"from tkinter import *\nfrom paho.mqtt import client as mqtt_client\n\nimport random, json\n\nbroker \t\t= 'localhost'\nport \t\t= 1883\ntopic \t\t= 'proyek'\nclient_id \t= f'python-mqtt-{random.randint(0, 100)}'\n\ndef connect_mqtt() -> mqtt_client:\n def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to MQTT Broker!\")\n else:\n print(\"Failed to connect, return code %d\\n\", rc)\n\n client = mqtt_client.Client(client_id)\n client.on_connect = on_connect\n client.connect(broker, port)\n return client\n\nimport json\ndef subscribe(client: mqtt_client):\n def on_message(client, userdata, msg):\n _data = json.loads(msg.payload.decode())\n stats = str(_data[\"Parameters\"][\"status\"])\n \n if (stats == 0):\n square.create_rectangle(143, 89, 443, 239, fill = '#91F086', outline = 'black')\n else :\n square.create_rectangle(143, 89, 443, 239, fill = '#FF5252', outline = 'black')\n\n client.subscribe(topic)\n client.on_message = on_message\n\nwindow = Tk()\n\n# Title bar\nwindow.title('Smart Locker Dashboard')\nwindow.geometry('1366x768')\nwindow.resizable(False, True)\nwindow.configure(bg = 'white')\n\n# Banner\ncanvas = Canvas(window, width = 1366, height = 200)\ncanvas.place(x = 0, y = 0)\nimg = PhotoImage(file = 'banner.png')\ncanvas.create_image(0, 0, anchor = NW, image = img)\n\n# Square\nsquare = Canvas(window, width=1366, height=568)\nsquare.place(x = 0, y = 200)\n\n# Baris 1 ( x1, y1, x2, y2 )\nsquare.create_rectangle(143, 89, 443, 239, fill = '#91F086', outline = 'black')\nsquare.create_rectangle(533, 89, 833, 239, fill = '#91F086', outline = 'black')\nsquare.create_rectangle(923, 89, 1223, 239, fill = '#FF5252', outline = 'black')\n\n# Baris 2 ( x1, y1, x2, y2 )\nsquare.create_rectangle(143, 329, 443, 479, fill = '#91F086', outline = 'black')\nsquare.create_rectangle(533, 329, 833, 479, fill = '#FF5252', outline = 'black')\nsquare.create_rectangle(923, 329, 1223, 479, fill = '#91F086', outline = 'black')\n\nclient = connect_mqtt()\nsubscribe(client)\nclient.loop_start()\nwindow.mainloop()\nclient.loop_stop()","repo_name":"akumakumu/locker-dashboard","sub_path":"dashboard-locker.py","file_name":"dashboard-locker.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"23645237812","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.signal import convolve2d as conv2\nfrom skimage import color, data, restoration\nfrom sklearn.metrics import mean_squared_error\n\nrng = np.random.default_rng()\nastro = color.rgb2gray(data.astronaut())\n\npsf = np.ones((5, 5)) / 25\nastro = conv2(astro, psf, 'same')\n\nastro_noisy = astro.copy()\nastro_noisy += (rng.poisson(lam=25, size=astro.shape) - 10) / 255.\n\ndeconvolved_RL = restoration.richardson_lucy(astro_noisy, psf, num_iter=30)\n\nfig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 5))\nplt.gray()\n\nfor a in (ax[0], ax[1], ax[2]):\n a.axis('off')\n\nax[0].imshow(astro)\nax[0].set_title('Original Data')\n\nax[1].imshow(astro_noisy)\nax[1].set_title('Noisy data')\n\nax[2].imshow(deconvolved_RL, vmin=astro_noisy.min(), vmax=astro_noisy.max())\nax[2].set_title('Restoration using\\nRichardson-Lucy')\n\n\nprint(\"MSE(Noisy) = \", mean_squared_error(astro, astro_noisy))\nprint(\"MSE(RL) = \", mean_squared_error(astro, deconvolved_RL))\n\nfig.subplots_adjust(wspace=0.02, hspace=0.2,\n top=0.9, bottom=0.05, left=0, right=1)\n\nplt.savefig(\"image_comparison.png\")","repo_name":"Fegrant/tps-proc-de-imagenes","sub_path":"tp5/Ej3/blind_deconv.py","file_name":"blind_deconv.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"13342814887","text":"# Package imports\nimport pandas\nimport numpy\n\n# Local imports\nfrom calculate_framingham_risk_score import calculate_framingham_risk_score\n\n\ndef sample_from_key_to_unique(data, key):\n '''\n Requires:\n data - Pandas DataFrame\n key - Column of \"data\" to be sampled from\n\n Returns:\n result - Pandas DataFrame single entry per unique value of \"key\"\n from \"data\" sampled randomly\n '''\n # Distinct list of keys\n unique = data[key].unique()\n\n result = data.truncate(before=-1, after=-1).copy()\n # Sample one entry from each key\n for value in unique:\n result = pandas.concat([\n result,\n data.loc[data[key] == value].sample(1)\n ])\n\n return result\n\n\ndef process_synthea_patient_data(data_dir,\n data_save_dir,\n data_save_name):\n\n data = dict()\n paths = dict()\n datasets = [\n 'patients',\n 'observations',\n 'medications'\n ]\n\n # Import csv data\n for d in datasets:\n paths[d] = data_dir + d + '.csv'\n data[d] = pandas.read_csv(paths[d]).rename(str.lower, axis='columns')\n\n features = [\n 'Systolic Blood Pressure',\n 'Diastolic Blood Pressure',\n 'Tobacco smoking status NHIS',\n 'Body Mass Index',\n 'Glucose',\n 'Triglycerides',\n 'High Density Lipoprotein Cholesterol',\n 'Total Cholesterol',\n 'Low Density Lipoprotein Cholesterol'\n ]\n\n '''\n Process MEDICATIONS data\n '''\n # Gather examples of medication for 'Hypertension' or high blood pressure\n data['medications'].dropna(inplace=True)\n\n data['hypertension'] = (data['medications']\n .loc[data['medications']['reasondescription']\n .str.contains('Hypertension'),\n ['start', 'stop', 'patient']])\n\n del data['medications']\n\n data['hypertension'] = (data['hypertension']\n .assign(blood_pressure_med_treatment=True))\n\n '''\n Process PATIENTS data\n '''\n data['patients'] = (data['patients'][['id', 'birthdate', 'gender']]\n .rename({'id': 'patient', 'gender': 'sex'},\n axis='columns'))\n\n data['patients'].dropna(inplace=True)\n\n '''\n Process OBSERVATIONS data\n '''\n data['observations']['description'].dropna(inplace=True)\n\n new_feature = (data['observations']['description']\n .str.split(r'(\\-|\\[.*\\])', n=1, expand=True))\n\n data['observations'] = (data['observations']\n .assign(feature=new_feature[0]\n .str.strip()))\n\n data['obs features'] = (data['observations']\n .loc[data['observations']['feature']\n .isin(features),\n ['date', 'patient', 'encounter',\n 'feature', 'value']])\n\n del data['observations']\n\n data['obs features'].dropna(inplace=True)\n\n # Process numerical features\n data['obs floats'] = data['obs features'][data['obs features']['value']\n .str.contains(r'^[\\d\\.]+$')]\n\n data['obs floats'] = (data['obs floats']\n .assign(value=data['obs floats']['value']\n .astype(float)))\n\n # Process categorical features\n # Gather examples of smokers\n data['smokers'] = (data['obs features']\n .loc[data['obs features']['feature']\n .str.contains('Tobacco smoking status NHIS'),\n ['encounter', 'patient', 'value']])\n\n data['smokers'] = (data['smokers']\n .replace({'Former smoker': True,\n 'Never smoker': False,\n 'Current every day smoker': True})\n .rename({'value': 'smoker'}, axis='columns'))\n\n # Transpose such that features are columns with 'value as their values\n data['features'] = (data['obs floats']\n .pivot_table(index=['date', 'encounter', 'patient'],\n columns='feature', values='value')\n .reset_index()\n .dropna()\n .copy())\n\n del data['obs floats']\n\n # Merge 'patient', 'obserservations', 'smoker' and 'hypertension' data\n\n # Combine patient and observation data\n data['features'] = pandas.merge(data['features'], data['patients'],\n how='left', on='patient')\n\n # Combine with smoker status\n merged = pandas.merge(data['features'], data['smokers'],\n how='left', on=['encounter', 'patient']).copy()\n\n # Combine with hypertention medication status\n merged = pandas.merge(merged, data['hypertension'],\n how='left', on='patient')\n\n data['features'] = merged[\n (merged['start'].isnull() |\n ((merged['start'] <= merged['date']) &\n (merged['stop'] >= merged['date']))\n )]\n\n del merged, data['patients'], data['hypertension'], data['smokers']\n\n data['features'] = (\n data['features']\n .assign(blood_pressure_med_treatment=data['features'][\n 'blood_pressure_med_treatment']\n .fillna(False)))\n\n # Clean feature names\n new_feature_names = {\n 'Body Mass Index': 'bmi',\n 'Total Cholesterol': 'total_cholesterol',\n 'High Density Lipoprotein Cholesterol': 'hdl_cholesterol',\n 'Low Density Lipoprotein Cholesterol': 'ldl_cholesterol',\n 'Systolic Blood Pressure': 'systolic_blood_pressure',\n 'Diastolic Blood Pressure': 'diastolic_blood_pressure'\n }\n\n data['features'] = (data['features']\n .rename(new_feature_names, axis='columns')\n .rename(str.lower, axis='columns'))\n\n # Derived features\n # Age at time of observation\n data['features'] = data['features'].assign(age=(\n numpy.subtract(\n pandas.to_datetime(data['features'].date),\n pandas.to_datetime(data['features'].birthdate)))\n .dt.days / 365.25)\n\n # Framingham scoreA\n data['features'] = calculate_framingham_risk_score(data['features'],\n 'framingham')\n\n # Randomly sample from key 'patients' to create unique entry for each\n data['unique_features'] = sample_from_key_to_unique(\n data=data['features'],\n key='patient')\n\n # Pickle data to supplied location\n data['unique_features'].to_pickle(data_save_dir + data_save_name)\n\n return data['unique_features']\n","repo_name":"mckayryan/Differential_privacy_MVG_mechanism","sub_path":"code/process_synthea_patient_data.py","file_name":"process_synthea_patient_data.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"}
+{"seq_id":"26192850735","text":"from django.shortcuts import render, redirect\nfrom django.core.mail import send_mail\nfrom .forms import ContactForm\n\ndef index(request):\n sent=False\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n subject = f\"Email from {cd['f_name']} address {cd['email']}\"\n message = f\"The Message {cd['message']}\"\n send_mail(\n subject,\n message,\n 'zbdtechpro@gmail.com',\n ['shohruz.zubaidov@gmail.com'],\n )\n send = True\n return redirect('index')\n else:\n form = ContactForm()\n\n return render(request, 'index.html', {'form': form, 'sent':sent})","repo_name":"Zubaidov/contact_form_in_django","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"37871695471","text":"import re\nimport requests\n\n\ndef check_is_mixer_bridge_exchange(address, is_eoa, chain_id):\n \"\"\"\n This function is used to parse the explorer and check if address is mixer, bridge, cex, dex\n :param address:\n :param is_eoa:\n :param chain_id:\n :return:\n \"\"\"\n if chain_id == 1:\n base_url = \"https://etherscan.io/address/\"\n elif chain_id == 137:\n base_url = \"https://polygonscan.com/address/\"\n elif chain_id == 10:\n base_url = \"https://optimistic.etherscan.io/address/\"\n elif chain_id == 56:\n base_url = \"https://bscscan.com/address/\"\n elif chain_id == 250:\n base_url = \"https://ftmscan.com/address/\"\n elif chain_id == 42161:\n base_url = \"https://arbiscan.io/address/\"\n else:\n base_url = \"https://etherscan.io/address/\"\n\n headers_etherscan = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:107.0) Gecko/20100101 Firefox/107.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',\n 'Accept-Language': 'en-GB,en;q=0.5',\n 'Referer': 'https://etherscan.io/txs',\n 'Alt-Used': 'etherscan.io',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'Sec-Fetch-Dest': 'document',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-User': '?1',\n }\n\n try:\n response = requests.get(f'{base_url}{address.lower()}', headers=headers_etherscan)\n re_exchange = re.compile(r\"exchange|Exchange\")\n number_of_word_exchange = len(re_exchange.findall(response.text))\n re_bridge = re.compile(r\"bridge|Bridge\")\n number_of_word_bridge = len(re_bridge.findall(response.text))\n re_dex = re.compile(r\"Decentralized Exchange|decentralized exchange|dex|DEX\")\n number_of_word_dex = len(re_dex.findall(response.text))\n\n if number_of_word_bridge > number_of_word_exchange and not is_eoa:\n return 'bridge'\n elif is_eoa:\n return 'exchange'\n elif not is_eoa and number_of_word_dex > 0:\n return 'dex'\n else:\n return 'mixer'\n\n except Exception as e:\n print(f\"Unable to check the type of the address ({address}): {e}\")\n return 'unknown'\n","repo_name":"venglov/Funding-Laundering-Detector","sub_path":"src/mixer_bridge_exchange.py","file_name":"mixer_bridge_exchange.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"28"}
+{"seq_id":"33758392350","text":"from pathlib import Path\n\nfrom .load_utils import ICLDataset, split_icl_dataset\n\n\nclass GuideLoader:\n def __init__(self, examples: dict, train_test_split=None):\n self.dataset = ICLDataset(examples)\n\n if train_test_split is not None:\n self.dataset = split_icl_dataset(\n self.dataset, test_size=train_test_split, seed=0\n )\n\n @classmethod\n def from_dict(cls, guide_dict):\n return cls(guide_dict)\n\n @classmethod\n def from_dataset(cls, dataset_path: str | Path):\n dataset = ICLDataset(dataset_path)\n return cls(dataset)\n","repo_name":"berkecanrizai/llambda","sub_path":"llambda/guiding/guiding.py","file_name":"guiding.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"36167203956","text":"import hashlib\nimport logging\nimport os\nimport yaml\n\nfrom randrctl.exception import InvalidProfileException, NoSuchProfileException\nfrom randrctl.model import Profile, Rule, Output, XrandrConnection\n\nlogger = logging.getLogger(__name__)\n\n\ndef hash(string: str):\n if string:\n return hashlib.md5(string.encode()).hexdigest()\n else:\n return None\n\n\nclass ProfileManager:\n def __init__(self, read_locations: list, write_location: str):\n self.read_locations = list(filter(lambda location: os.path.isdir(location), read_locations))\n self.write_location = write_location\n\n def read_all(self):\n profiles = []\n for profile_dir in self.read_locations:\n for entry in os.listdir(profile_dir):\n path = os.path.join(profile_dir, entry)\n if os.path.isfile(path):\n try:\n with open(path) as profile_file:\n profiles.append(self.read_file(profile_file))\n except InvalidProfileException as e:\n logger.warning(e)\n return profiles\n\n def read_one(self, profile_name: str):\n # TODO handle missing profile\n profile = None\n for profile_dir in self.read_locations:\n profile_path = os.path.join(profile_dir, profile_name)\n if not os.path.isfile(profile_path):\n continue\n with open(profile_path) as profile_file:\n profile = self.read_file(profile_file)\n break\n\n if profile:\n return profile\n else:\n raise NoSuchProfileException(profile_name, self.read_locations)\n\n def read_file(self, profile_file_descriptor):\n try:\n result = yaml.load(profile_file_descriptor, Loader=yaml.FullLoader)\n\n rules = result.get('match')\n priority = int(result.get('priority', 100))\n\n if rules:\n for k, v in rules.items():\n # backward compatibility for match.mode\n if v.get('mode'):\n logger.warning(\"%s\\n\\tmatch.mode is deprecated\"\n \"\\n\\tConsider changing to 'supports' or 'prefers'\", profile_file_descriptor.name)\n v['supports'] = v['mode']\n del v['mode']\n rules[k] = Rule(**v)\n\n primary = result.get('primary')\n outputs_raw = result['outputs']\n outputs = {}\n for name, mode_raw in outputs_raw.items():\n outputs[name] = Output(**mode_raw)\n\n name = os.path.basename(profile_file_descriptor.name)\n\n return Profile(name, outputs, rules, primary, priority)\n except (KeyError, ValueError):\n raise InvalidProfileException(profile_file_descriptor.name)\n\n def write(self, p: Profile, yaml_flow_style: bool=False):\n \"\"\"\n Write profile to file into configured profile directory.\n Profile name becomes the name of the file. If name contains illegal characters, only safe part is used.\n For example, if name is my_home_vga/../../passwd, then file will be written as passwd under profile dir\n \"\"\"\n os.makedirs(self.write_location, exist_ok=True)\n dict = p.to_dict()\n safename = os.path.basename(p.name)\n fullname = os.path.join(self.write_location, safename)\n if safename != p.name:\n logger.warning(\"Illegal name provided. Writing as %s\", fullname)\n with open(fullname, 'w+') as fp:\n yaml.dump(dict, fp, default_flow_style=yaml_flow_style)\n\n def print(self, p: Profile, yaml_flow_style: bool=False):\n print(yaml.dump(p.to_dict(), default_flow_style=yaml_flow_style))\n\n def profile_from_xrandr(self, xrandr_connections: list, profile_name: str='profile'):\n outputs = {}\n rules = {}\n primary = None\n for connection in xrandr_connections:\n output_name = connection.name\n display = connection.display\n if not display or not connection.is_active():\n continue\n output = Output.fromconnection(connection)\n if connection.primary:\n primary = output_name\n outputs[output_name] = output\n rule = Rule(hash(display.edid), display.preferred_mode, display.mode)\n rules[output_name] = rule\n\n logger.debug(\"Extracted %d outputs from %d xrandr connections\", len(outputs), len(xrandr_connections))\n\n return Profile(profile_name, outputs, rules, primary)\n\n\nclass ProfileMatcher:\n \"\"\"\n Matches profile to xrandr connections\n \"\"\"\n def match(self, available_profiles: list, xrandr_outputs: list):\n \"\"\"\n return a sorted list of matched profiles\n \"\"\"\n output_names = set(map(lambda o: o.name, xrandr_outputs))\n\n # remove those with disconnected outputs\n with_rules = filter(lambda p: p.match and len(p.match) > 0, available_profiles)\n with_rules_covering_outputs = filter(lambda p: len(set(p.match) - output_names) == 0, with_rules)\n profiles = list(with_rules_covering_outputs)\n\n logger.debug(\"%d/%d profiles match outputs sets\", len(profiles), len(available_profiles))\n\n matching = []\n for p in profiles:\n score = self._calculate_profile_score(p, xrandr_outputs)\n if score >= 0:\n matching.append((score, p))\n return sorted(matching, key=lambda x: (x[0], x[1].priority), reverse=True)\n\n def find_best(self, available_profiles: list, xrandr_outputs: list):\n \"\"\"\n Find first matching profile across availableProfiles for actualConnections\n \"\"\"\n matching = self.match(available_profiles, xrandr_outputs)\n\n if not matching:\n return None\n\n max_score, p = matching[0]\n logger.debug(\"Found %d profiles with maximum score %d\", len(matching), max_score)\n logger.debug(\"Selected profile %s with score %d and priority %d\", p.name, max_score, p.priority)\n return p\n\n def _calculate_profile_score(self, p: Profile, xrandr_outputs: list):\n \"\"\"\n Calculate how profile matches passed specific outputs.\n Return numeric score\n \"\"\"\n score = 0\n logger.debug(\"Trying profile %s\", p.name)\n for o in xrandr_outputs:\n rule = p.match.get(o.name)\n s = self._score_rule(rule, o) if rule is not None else 0\n logger.debug(\"%s scored %d for output %s\", p.name, s, o.name)\n if s >= 0:\n score += s\n else:\n logger.debug(\"%s doesn't match %s\", p.name, o.name)\n score = -1\n break\n logger.debug(\"%s total score: %d\", p.name, score)\n return score\n\n def _score_rule(self, rule: Rule, xrandr_output: XrandrConnection):\n \"\"\"\n Starting rule score is 0 (a rule without any additional criteria for a connection still triggers auto-matching).\n Criteria, if defined, are checked and resulting rule score increases with every matched criterion.\n If any of the defined criteria fails to match, -1 is immediately returned.\n \"\"\"\n score = 0\n if rule.edid:\n if rule.edid == hash(xrandr_output.display.edid):\n score += 3\n else:\n return -1\n\n if rule.prefers:\n if xrandr_output.display.preferred_mode == rule.prefers:\n score += 2\n else:\n return -1\n\n if rule.supports:\n if xrandr_output.display.supported_modes.count(rule.supports) > 0:\n score += 1\n else:\n return -1\n return score\n","repo_name":"koiuo/randrctl","sub_path":"randrctl/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"28"}
+{"seq_id":"1922349950","text":"import math\r\nimport numpy as np\r\nimport json\r\nimport matplotlib.pylab as plt\r\nimport os.path\r\n\r\ndata = {}\r\ndata['data'] = []\r\n\r\n\r\nx = np.arange(-100,100,0.01)\r\nA = 1.25313\r\ny = []\r\ndef asd(i):\r\n b = math.sin((i * i - A*A))\r\n c = math.cos(b) ** 2\r\n n = 0.001 * (i * i + A*A)\r\n function = 0.5 + (c - 0.5) / (1 + n)\r\n y.append(function)\r\n return function\r\n\r\n[data['data'].append({'x': '{:.2f}'.format(i),'y': '{:.2f}'.format(asd(i))}) for i in x]\r\n\r\nplt.grid()\r\nplt.plot(x,y)\r\nplt.show()\r\nfilename = os.path.join('results')\r\nif not os.path.exists(filename) == True:\r\n os.mkdir(filename)\r\nfilename = os.path.join(filename, 'results.json')\r\n\r\nwith open(filename, 'w') as outfile:\r\n json.dump(data, outfile)\r\n\r\n\r\n","repo_name":"Slava9820/results","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"39708795998","text":"# -*- encoding: utf-8 -*-\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import View\nfrom cirujanos.apps.web.models import (\n Pathology,\n PathologyArticle,\n PathologyVideo,\n Procedure,\n ProcedureArticle,\n ProcedureVideo)\nfrom cirujanos.views import AppDetailView\n\nclass PathologyIndexView(View):\n def get(self, request):\n pathology = Pathology.menu_objects.first()\n url = reverse('pathology_detail', kwargs={'slug': pathology.slug})\n return HttpResponseRedirect(url)\n\n\nclass PathologyDetailView(AppDetailView):\n model = Pathology\n template_name = 'web/pathology/index.html'\n\n def get_context_data(self, **kwargs):\n context = super(PathologyDetailView, self).get_context_data(**kwargs)\n articles = PathologyArticle.objects. \\\n filter(pathology=self.object).order_by('order')\n videos = PathologyVideo.objects. \\\n filter(pathology=self.object).order_by('order')\n context.update({\n 'pathology_detail': self.object,\n 'pathology_articles': articles,\n 'pathology_videos': videos,\n })\n return self.decorate_context(context)\n\n\nclass ProcedureIndexView(View):\n def get(self, request):\n procedure = Procedure.menu_objects.first()\n url = reverse('procedure_detail', kwargs={'slug': procedure.slug})\n return HttpResponseRedirect(url)\n\n\nclass ProcedureDetailView(AppDetailView):\n model = Procedure\n template_name = 'web/procedure/index.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProcedureDetailView, self).get_context_data(**kwargs)\n articles = ProcedureArticle.objects. \\\n filter(procedure=self.object).order_by('order')\n videos = ProcedureVideo.objects. \\\n filter(procedure=self.object).order_by('order')\n context.update({\n 'procedure_detail': self.object,\n 'procedure_articles': articles,\n 'procedure_videos': videos,\n })\n return self.decorate_context(context)\n","repo_name":"dsaenztagarro/cirujanos","sub_path":"cirujanos/apps/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"4072859684","text":"#import _init_paths\nimport numpy as np \nimport tensorflow as tf \nimport argparse\nimport time\nimport os\nimport pdb\n\nfrom lib.episode_generator import EpisodeGenerator\nfrom lib.networks import ProtoNet \n\ndef parse_args():\n parser = argparse.ArgumentParser(description='neural statistician protonet')\n parser.add_argument('--init', dest='initial_step', default=0, type=int) \n parser.add_argument('--maxe', dest='max_epoch', default=100, type=int)\n parser.add_argument('--qs', dest='qsize', default=15, type=int)\n parser.add_argument('--nw', dest='nway', default=5, type=int)\n parser.add_argument('--ks', dest='kshot', default=1, type=int)\n parser.add_argument('--sh', dest='show_epoch', default=1, type=int)\n parser.add_argument('--sv', dest='save_epoch', default=10, type=int)\n parser.add_argument('--pr', dest='pretrained', default=False, type=bool)\n parser.add_argument('--data', dest='dataset_dir', default='../data_npy/miniImagenet')\n parser.add_argument('--model', dest='model_dir', default='../models')\n parser.add_argument('--dset', dest='dataset_name', default='miniImagenet')\n parser.add_argument('--name', dest='model_name', default='protonet')\n parser.add_argument('--lr', dest='lr', default=1e-3, type=float)\n parser.add_argument('--train', dest='train', default=1, type=int)\n parser.add_argument('--vali', dest='val_iter', default=60, type=int)\n args = parser.parse_args()\n return args\n\ndef validate(test_net, test_gen):\n accs, losses = [], []\n np.random.seed(2)\n for _ in range(args.val_iter):\n sx, sy, qx, qy = test_gen.get_episode(5, args.kshot, args.qsize)\n fd = {\\\n test_net.inputs['sx']: sx,\n test_net.inputs['qx']: qx,\n test_net.inputs['qy']: qy}\n outputs = [test_net.outputs['acc'], test_net.outputs['loss']]\n acc, loss = sess.run(outputs, fd)\n accs.append(acc)\n losses.append(loss)\n print ('Validation - ACC: {:.3f} ({:.3f})'\n '| LOSS: {:.3f} '\\\n .format(np.mean(accs) * 100., \n np.std(accs) * 100. * 1.96 / np.sqrt(args.val_iter),\n np.mean(losses)))\n np.random.seed()\n\nif __name__=='__main__': \n args = parse_args() \n print ('='*50) \n print ('args::') \n for arg in vars(args):\n print ('%15s: %s'%(arg, getattr(args, arg)))\n print ('='*50) \n\n nway = args.nway\n kshot = args.kshot\n qsize = args.qsize \n test_kshot = args.kshot\n\n lr_ph = tf.placeholder(tf.float32) \n protonet = ProtoNet(args.model_name, nway, kshot, qsize, isTr=True)\n loss = protonet.outputs['loss']\n acc = protonet.outputs['acc']\n \n # only evaluates 5way - kshot\n test_net = ProtoNet(args.model_name, 5, test_kshot, qsize, isTr=False, reuse=True)\n\n opt = tf.train.AdamOptimizer(lr_ph) \n update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_op):\n train_op = opt.minimize(loss) \n saver = tf.train.Saver()\n \n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n if args.pretrained:\n loc = os.path.join(args.model_dir,\n args.model_name, \n args.dataset_name + '.ckpt')\n saver.restore(sess, loc)\n\n train_gen = EpisodeGenerator(args.dataset_dir, 'train')\n test_gen = EpisodeGenerator(args.dataset_dir, 'test')\n if args.train:\n max_iter = train_gen.dataset_size[args.dataset_name] * args.max_epoch \\\n // (nway * qsize)\n show_step = args.show_epoch * max_iter // args.max_epoch\n save_step = args.save_epoch * max_iter // args.max_epoch\n avger = np.zeros([4])\n for i in range(1, max_iter+1): \n stt = time.time()\n cur_epoch = i * (nway * qsize) // train_gen.dataset_size[args.dataset_name]\n lr = args.lr if i < 0.7 * max_iter else args.lr*.1\n sx, sy, qx, qy = train_gen.get_episode(nway, kshot, qsize)\n fd = {\\\n protonet.inputs['sx']: sx,\n protonet.inputs['qx']: qx,\n protonet.inputs['qy']: qy,\n lr_ph: lr}\n p1, p2, _ = sess.run([acc, loss, train_op], fd)\n avger += [p1, p2, 0, time.time() - stt] \n\n if i % show_step == 0 and i != 0: \n avger /= show_step\n print ('========= epoch : {:8d}/{} ========='\\\n .format(cur_epoch, args.max_epoch))\n print ('Training - ACC: {:.3f} '\n '| LOSS: {:.3f} '\n '| lr : {:.3f} '\n '| in {:.2f} secs '\\\n .format(avger[0], \n avger[1], lr, avger[3]*show_step))\n validate(test_net, test_gen)\n avger[:] = 0\n\n if i % save_step == 0 and i != 0: \n out_loc = os.path.join(args.model_dir, # models/\n args.model_name, # bline/\n args.dataset_name + '.ckpt') # cifar100.ckpt\n print ('saved at : {}'.format(out_loc))\n saver.save(sess, out_loc)\n else: # if test only\n validate(test_net, test_gen)\n","repo_name":"minseop-aitrics/FewshotLearning","sub_path":"ProtoNet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"22924075632","text":"from bs4 import BeautifulSoup\nimport os\ndirectory = \"./\"\nfor filename in os.listdir(directory):\n\tf = os.path.join(directory, filename)\n\tif os.path.isfile(f):\n\t\twith open(f, 'r') as file:\n\t\t\thtml_doc = file.read()\n\t\t\tsoup = BeautifulSoup(html_doc, 'html.parser')\n\t\t\tfor des_row in soup.find_all('tr', valign=\"top\"): \n\t\t\t\tif des_row.find('td', string=\"Overall design\"):\n\t\t\t\t\tdes = des_row.find('td', style=\"text-align: justify\")\n\t\t\t\t\tprint(des.text)\t\t\t\t\t\n","repo_name":"gustavoquincy/find_new_seq_in_scrnaseqdb","sub_path":"find_design.py","file_name":"find_design.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"37987010388","text":"import collections\n\ndef solution(n, costs):\n answer, cnt = 0, 1\n graph = collections.defaultdict(list)\n \n for s, e, c in costs:\n graph[s].append((e,c))\n graph[e].append((s,c))\n \n check = [False]*n\n check[0] = True\n while cnt < n:\n minV, u = float('inf'), -1\n for i in range(len(check)):\n if not check[i]:\n continue\n for e, c in sorted(graph[i], key=lambda x: x[1]):\n if check[e]:\n continue\n if c < minV:\n minV = c\n u = e\n check[u] = True\n answer += minV\n cnt += 1\n \n return answer","repo_name":"zeunny/APS","sub_path":"programmers/42861.py","file_name":"42861.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"9124763163","text":"from typing import List\n\nclass Solution:\n # 1 time O((m + n)log(m + n))\n # def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n # nums1.extend(nums2)\n # nums1.sort()\n # n = len(nums1)\n # if n % 2:\n # return nums1[n // 2]\n # else:\n # return (nums1[n // 2 - 1] + nums1[n // 2]) / 2\n\n # 2 merge order method: time O(m + n)\n\n # 3 time O(log(min(m, n)))\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n if len(nums1) > len(nums2):\n tmp = nums1\n nums1 = nums2\n nums2 = tmp\n\n m = len(nums1)\n n = len(nums2)\n # right position\n total_left = (m + n + 1) // 2\n\n # 在 nums1 的区间 [0, m] 里查找恰当的分割线\n # nums1[i - 1] <= nums2[j]\n left = 0\n right = m\n\n while left < right:\n i = (left + right + 1) // 2\n j = total_left - i\n if nums1[i - 1] > nums2[j]:\n right = i - 1\n else:\n left = i\n\n i = left\n j = total_left - i\n nums1LeftMax = float(\"-inf\") if i == 0 else nums1[i - 1]\n nums1RightMin = float(\"inf\") if i == m else nums1[i]\n nums2LeftMax = float(\"-inf\") if j == 0 else nums2[j - 1]\n nums2RightMin = float(\"inf\") if j == n else nums2[j]\n\n if ((m + n) % 2) == 1:\n return max(nums1LeftMax, nums2LeftMax)\n else:\n return (max(nums1LeftMax, nums2LeftMax) + min(nums1RightMin, nums2RightMin)) / 2\n\n# nums1 = [1,3]\n# nums2 = [2]\nnums1 = [1,2]\nnums2 = [3,4]\n# nums1 = [0,0]\n# nums2 = [0,0]\n# nums1 = []\n# nums2 = [1]\n# nums1 = [2]\n# nums2 = []\nprint(Solution().findMedianSortedArrays(nums1, nums2))\n","repo_name":"henryZe/code","sub_path":"leetcode/daily/4_findMedianSortedArrays.py","file_name":"4_findMedianSortedArrays.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"}
+{"seq_id":"38776452929","text":"import random\n\nimport auto_prefetch\n\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.utils.text import slugify\n\nfrom utils.telegram import report_to_admin\nfrom markdownx.models import MarkdownxField\n\nAFFILIATE_REGION_SCOPE_CHOICES = (\n (\"US\", \"USA\"),\n (\"DE\", \"Germany\"),\n (\"ES\", \"Spain\"),\n (\"IT\", \"Italy\"),\n (\"FR\", \"France\"),\n (\"UK\", \"United Kingdom\"),\n)\n\n\nDISCLOSURES = (\n (\"amazon\", \"As an Amazon Associate I earn from qualifying purchases.\"),\n (\"general\", \"We will earn some commisions from this links\"),\n)\n\n\nBOOK_LEVELS = (\n (1, \"Very Basic\"),\n (2, \"Basic\"),\n (3, \"Intermediate\"),\n (4, \"Intermediate-Advanced\"),\n (5, \"Advanced\"),\n (6, \"Very Advanced\"),\n)\n\n\nBOOK_TEST_TYPES = (\n (\"general\", \"General 🤓\"),\n (\"cambridge\", \"Cambridge 💂♂️\"),\n (\"ielts\", \"IELTS 🇬🇧\"),\n (\"toefl\", \"TOEFL 🗽\"),\n (\"celpip\", \"CELPIP 🏢\"),\n)\n\nBOOK_CATEGORIES = (\n (\"general\", \"General 📗\"),\n (\"text-book\", \"Text books 📚\"),\n (\"writing\", \"Writing 📝\"),\n (\"vocabulary\", \"Vocabulary 👨🏫\"),\n)\n\n\nclass Book(auto_prefetch.Model):\n name = models.CharField(max_length=128)\n description = MarkdownxField()\n image = models.ImageField(upload_to=\"books\", null=True)\n level = models.PositiveSmallIntegerField(default=3, choices=BOOK_LEVELS)\n test_type = models.CharField(\n default=\"general\",\n max_length=16,\n choices=BOOK_TEST_TYPES,\n )\n category = models.CharField(\n default=\"general\",\n max_length=16,\n choices=BOOK_CATEGORIES,\n )\n slug = models.SlugField(max_length=128, blank=True, unique=True)\n featured = models.BooleanField(default=False)\n\n affiliate_link = models.URLField(null=True)\n affiliate_label = models.CharField(max_length=64, blank=True, null=True)\n affiliate_disclosure = models.CharField(\n max_length=16,\n choices=DISCLOSURES,\n default=\"amazon\",\n )\n promoted = models.BooleanField(default=False)\n\n def get_remote_image(self):\n from pathlib import Path\n import urllib.request\n from django.core.files import File\n\n if self.image_url and not self.image:\n tmpfilepath, _ = urllib.request.urlretrieve(self.image_url)\n path = Path(tmpfilepath)\n filename = path.name + \".\" + self.image_url.split(\".\")[-1]\n with path.open(mode=\"rb\") as f:\n self.image = File(f, name=filename)\n self.save()\n\n def get_detail_url(self):\n return reverse(\"book_detail\", kwargs={\"slug\": self.slug})\n\n def get_absolute_url(self):\n return self.get_detail_url()\n\n def get_list_url(self):\n return reverse(\"book_list\")\n\n def get_related_books(self, n=3):\n return self.__class__.objects.exclude(pk=self.pk).filter(\n level__in=[self.level - 1, self.level, self.level + 1],\n test_type=self.test_type,\n category=self.category,\n )[:n]\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n def get_promotion_text(self):\n \"\"\"\n It generates text for promoting a blog post\n \"\"\"\n starting_list = [\n \"Check out this book if you are interested in learning in an organised way!\",\n \"Check out this book!\",\n \"We all know that studying with a book is one of the best ways to get more knowledge. We recommend this one to improve your English!\",\n ]\n text = \"\"\n text += f\"{random.choice(starting_list)}\\n\\n\"\n text += f\"📗 Title: {self.name}\\n\\n\"\n text += f\"📊 Level: {self.get_level_display()}\\n\\n\"\n text += f\"More here 👉 {settings.SITE_BASE_URL}{self.get_detail_url()}\\n\\n\"\n\n return text\n\n @classmethod\n def get_random_object_to_promote(cls):\n qs = cls.objects.filter(promoted=False)\n if not qs.exists():\n qs = cls.objects.all()\n report_to_admin(\"All books were promoted, please make more.\")\n return random.choice(list(qs))\n\n\nclass BookAffiliateLink(models.Model):\n book = models.ForeignKey(Book, on_delete=models.CASCADE)\n url = models.URLField()\n label = models.CharField(max_length=64)\n is_global = models.BooleanField(default=True)\n country_code = models.CharField(max_length=8)\n","repo_name":"ramiboutas/englishquiz","sub_path":"affiliates/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"28"}
+{"seq_id":"8253278478","text":"from heapq import *\nclass Solution:\n def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:\n if K >= len(points):\n return points\n res = []\n for x,y in points:\n dis = math.sqrt(x**2 + y**2)\n if len(res) == K:\n heappushpop(res,[-dis,x,y])\n else:\n heappush(res,[-dis,x,y])\n return [[x,y] for _,x,y in res]","repo_name":"rajatg64/May-LeetCode-Challenge","sub_path":"Day29 K Closest Points to Origin.py","file_name":"Day29 K Closest Points to Origin.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"18948936372","text":"import math \n\n\ndef combinations(n, k): #get a set of n elements, shift right by k spots in that\n\n if k==0 or n == k: #If on boarders or at the very top of pyramid\n return 1\n else:\n return(combinations(n-1,k-1) + combinations(n-1,k)) #Return recursively all the numbers to hopefully get the number we need?\n\n\ndef BernoulliNumbers(number):#If not 1 and odd, is 0.00. \n #Need to solve for B(n), which is part of the sum.... \n if number == 0:\n return 1\n else:\n counter = 0\n for x in range(0,number): #From here on it sums up bernoullinumbers from range 0,number\n counter += combinations(number, x)*(BernoulliNumbers(x)/(number-x+1))#cant be //. Also can't use number+1 for combinations else we coult wind up with too much output.\n \n return 1-counter \n\ndef printBernoulliNumbers():\n for x in range(11):\n getvalue = BernoulliNumbers(x)\n print(\"B(\"+str(x)+\") = \" + str(round(getvalue, 8)))\n \n","repo_name":"enderquestral/Reed-CSCI382","sub_path":"Week4HW/Bernoullinumbers.py","file_name":"Bernoullinumbers.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"31596259120","text":"\"\"\"\nTune LDA models with OpenTuner\n\"\"\"\nimport opentuner\nimport os\nimport sys\nimport json\nimport statistics\nfrom opentuner import ConfigurationManipulator\nfrom opentuner import IntegerParameter\nfrom opentuner import FloatParameter\nfrom opentuner import MeasurementInterface\nfrom opentuner import Result\nsys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))\nfrom src.resources.utils.utils import parse_args_tune_lda, getPath, getWorkers, EvaluationMetrics\nfrom src.LDA.RunLDAOpts import OptionLDARunner\nfrom src.LDA.LDAOptions import LDAOptions\n\nmallet_path = getPath('mallet')\nos.environ.update({\n 'MALLET_HOME': os.path.dirname(os.path.dirname(mallet_path))\n})\n\nroot = getPath('root')\n#TODO: FIXTHIS: allow save data by single section or by multisections\nf_corpus_name = getPath('CORPUS_NAME')\nf_data_dict = os.path.join(*[root, 'post-processed-data', f_corpus_name])\nldarunner = OptionLDARunner(root=root, f_data_dict=f_data_dict, mallet_path=mallet_path, workers=getWorkers())\n\nclass LDATuner(MeasurementInterface):\n def manipulator(self):\n \"\"\"\n Define the search space by creating a\n ConfigurationManipulator\n \"\"\"\n manipulator = ConfigurationManipulator()\n\n numTopics_min, numTopics_max = [int(x) for x in self.args.numTopicsRange.split('-')]\n alpha_min, alpha_max = [float(x) for x in self.args.alphaRange.split('-')]\n beta_min, beta_max = [float(x) for x in self.args.betaRange.split('-')]\n\n manipulator.add_parameter(\n IntegerParameter('numTopics', numTopics_min, numTopics_max))\n manipulator.add_parameter(\n FloatParameter('alpha', alpha_min, alpha_max)\n )\n manipulator.add_parameter(\n FloatParameter('beta', beta_min, beta_max)\n )\n return manipulator\n\n def getVal(self, res, metric_val):\n \"\"\"\n Get value for param tuning.\n This is reserve for handling different formatted (dict and list) result from different configurations\n - Normal tuning returns single dictionary that include metrics such as coherence, perplexity, etc.\n Then according to the provided keyword, select the specific metric for tuning\n - Another case is the aggregation of running from 10 runs, and we use the median value for tuning\n The reason is to avoid noise and randomness in LDA training (sampling could cause variance in modeling)\n The result is a list of dictionaries, we will extract the median value of selected metric\n\n Parameters\n ----------\n res\n metric\n\n Return val for tuning\n -------\n\n \"\"\"\n metric = EvaluationMetrics(metric_val)\n if isinstance(res, dict):\n # Desired to maximize all metrics except perplexity\n if metric == EvaluationMetrics.PERPLEXITY:\n res_cost = float(res[metric.name])\n else:\n res_cost = -1 * float(res[metric.name])\n elif isinstance(res, list):\n # Get the list of values from 10 parallel runs\n if metric == EvaluationMetrics.PERPLEXITY:\n res_list = [float(x[metric]) for x in res]\n else:\n res_list = [-1 * float(x[metric.name]) for x in res]\n res_cost = statistics.median(res_list)\n return res_cost\n\n\n def run(self, desired_result, input, limit):\n \"\"\"\n Compile and run a given configuration then\n return performance\n \"\"\"\n cfg = desired_result.configuration.data\n\n lda_type = self.args.LDAType\n args = self.args\n args.numTopics = cfg['numTopics']\n args.alpha = cfg['alpha']\n args.beta = cfg['beta']\n if lda_type in [x.name for x in LDAOptions]:\n res = ldarunner.run(LDAOptions[lda_type], args)\n else:\n raise TypeError('Type %s not defined.' % lda_type)\n res_cost = self.getVal(res=res, metric_val=args.metric)\n # Return time to minimize the result\n return Result(time=res_cost)\n\n def save_final_config(self, configuration):\n \"\"\"called at the end of tuning\"\"\"\n print(\"Optimal block size written to %s_result.json:\" % self.args.LDAType, configuration.data)\n # self.manipulator().save_to_file(configuration.data,\n # '%s_result.json' % self.args.LDAType)\n with open('%s_result.json' % self.args.LDAType, 'w') as f_out:\n f_out.write(json.dumps(configuration.data))\n\n if hasattr(self.args, 'metric'):\n metric_name = EvaluationMetrics(args.metric).name\n f_opt_hyper_param = os.path.join(os.path.dirname(__file__), 'OptimalParam_{}.json'.format(metric_name))\n else:\n f_opt_hyper_param = os.path.join(os.path.dirname(__file__), 'OptimalParam.json')\n # Update OptimalParam.json\n if os.path.isfile(f_opt_hyper_param):\n with open(f_opt_hyper_param, 'r+') as f_update:\n res = json.load(f_update)\n res[self.args.LDAType] = configuration.data\n # Rewrite json result\n # Seek function finds the beginning of the file\n f_update.seek(0)\n f_update.write(json.dumps(res, indent=4))\n # Use truncate for inplace replacement\n f_update.truncate()\n else:\n # Create a new json result file for that metric\n with open(f_opt_hyper_param, 'w') as f_update:\n res = {self.args.LDAType: configuration.data}\n f_update.write(json.dumps(res, indent=4))\n\n\nif __name__ == '__main__':\n args, _ = parse_args_tune_lda(parents=opentuner.argparsers())\n LDATuner.main(args)\n","repo_name":"KDYao/Multilingual_LDA_ReplicationPackage","sub_path":"src/HyperTuning/HyperParamOptimization.py","file_name":"HyperParamOptimization.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"27142009084","text":"import wx\n\nfrom traits.api import Bool, Instance, Str, Tuple, provides\n\nfrom pyface.wx.aui import aui as AUI\nfrom pyface.image_cache import ImageCache\nfrom pyface.action.action_manager import ActionManager\nfrom pyface.action.i_tool_bar_manager import IToolBarManager\nfrom pyface.ui_traits import Orientation\n\n\n@provides(IToolBarManager)\nclass ToolBarManager(ActionManager):\n \"\"\" A tool bar manager realizes itself in errr, a tool bar control. \"\"\"\n\n # 'ToolBarManager' interface -------------------------------------------\n\n # Is the tool bar enabled?\n enabled = Bool(True)\n\n # Is the tool bar visible?\n visible = Bool(True)\n\n # The size of tool images (width, height).\n image_size = Tuple((16, 16))\n\n # The toolbar name (used to distinguish multiple toolbars).\n name = Str(\"ToolBar\")\n\n # The orientation of the toolbar.\n orientation = Orientation(\"horizontal\")\n\n # Should we display the name of each tool bar tool under its image?\n show_tool_names = Bool(True)\n\n # Should we display the horizontal divider?\n show_divider = Bool(False)\n\n # Private interface ----------------------------------------------------\n\n # Cache of tool images (scaled to the appropriate size).\n _image_cache = Instance(ImageCache)\n\n # ------------------------------------------------------------------------\n # 'object' interface.\n # ------------------------------------------------------------------------\n\n def __init__(self, *args, **traits):\n \"\"\" Creates a new tool bar manager. \"\"\"\n\n # Base class contructor.\n super().__init__(*args, **traits)\n\n # An image cache to make sure that we only load each image used in the\n # tool bar exactly once.\n self._image_cache = ImageCache(self.image_size[0], self.image_size[1])\n\n return\n\n # ------------------------------------------------------------------------\n # 'ToolBarManager' interface.\n # ------------------------------------------------------------------------\n\n def create_tool_bar(self, parent, controller=None, aui=False):\n \"\"\" Creates a tool bar. \"\"\"\n\n # If a controller is required it can either be set as a trait on the\n # tool bar manager (the trait is part of the 'ActionManager' API), or\n # passed in here (if one is passed in here it takes precedence over the\n # trait).\n if controller is None:\n controller = self.controller\n\n # Determine the wx style for the tool bar based on any optional\n # settings.\n style = wx.NO_BORDER | wx.CLIP_CHILDREN\n if aui:\n aui_style = AUI.AUI_TB_PLAIN_BACKGROUND\n if self.show_tool_names:\n aui_style |= AUI.AUI_TB_TEXT\n if self.orientation != \"horizontal\":\n aui_style |= AUI.AUI_TB_VERTICAL\n if not self.show_divider:\n style |= wx.TB_NODIVIDER\n tool_bar = _AuiToolBar(\n self, parent, -1, style=style, agwStyle=aui_style\n )\n else:\n style |= wx.TB_FLAT\n if self.show_tool_names:\n style |= wx.TB_TEXT\n if self.orientation == \"horizontal\":\n style |= wx.TB_HORIZONTAL\n else:\n style |= wx.TB_VERTICAL\n if not self.show_divider:\n style |= wx.TB_NODIVIDER\n tool_bar = _ToolBar(self, parent, -1, style=style)\n\n # fixme: Setting the tool bitmap size seems to be the only way to\n # change the height of the toolbar in wx.\n tool_bar.SetToolBitmapSize(self.image_size)\n\n # Add all of items in the manager's groups to the tool bar.\n self._wx_add_tools(parent, tool_bar, controller)\n\n # Make the tools appear in the tool bar (without this you will see\n # nothing!).\n tool_bar.Realize()\n\n # fixme: Without the following hack, only the first item in a radio\n # group can be selected when the tool bar is first realised 8^()\n self._wx_set_initial_tool_state(tool_bar)\n\n return tool_bar\n\n # ------------------------------------------------------------------------\n # Private interface.\n # ------------------------------------------------------------------------\n\n def _wx_add_tools(self, parent, tool_bar, controller):\n \"\"\" Adds tools for all items in the list of groups. \"\"\"\n\n previous_non_empty_group = None\n for group in self.groups:\n if len(group.items) > 0:\n # Is a separator required?\n if previous_non_empty_group is not None and group.separator:\n tool_bar.AddSeparator()\n\n previous_non_empty_group = group\n\n # Create a tool bar tool for each item in the group.\n for item in group.items:\n item.add_to_toolbar(\n parent,\n tool_bar,\n self._image_cache,\n controller,\n self.show_tool_names,\n )\n\n def _wx_set_initial_tool_state(self, tool_bar):\n \"\"\" Workaround for the wxPython tool bar bug.\n\n Without this, only the first item in a radio group can be selected\n when the tool bar is first realised 8^()\n\n \"\"\"\n\n for group in self.groups:\n checked = False\n for item in group.items:\n # If the group is a radio group, set the initial checked state\n # of every tool in it.\n if item.action.style == \"radio\":\n if item.control_id is not None:\n # Only set checked state if control has been created.\n # Using extra_actions of tasks, it appears that this\n # may be called multiple times.\n tool_bar.ToggleTool(\n item.control_id, item.action.checked\n )\n checked = checked or item.action.checked\n\n # Every item in a radio group MUST be 'radio' style, so we\n # can just skip to the next group.\n else:\n break\n\n # We get here if the group is a radio group.\n else:\n # If none of the actions in the group is specified as 'checked'\n # we will check the first one.\n if not checked and len(group.items) > 0:\n group.items[0].action.checked = True\n\n\nclass _ToolBar(wx.ToolBar):\n \"\"\" The toolkit-specific tool bar implementation. \"\"\"\n\n # ------------------------------------------------------------------------\n # 'object' interface.\n # ------------------------------------------------------------------------\n\n def __init__(self, tool_bar_manager, parent, id, style):\n \"\"\" Constructor. \"\"\"\n\n wx.ToolBar.__init__(self, parent, -1, style=style)\n\n # Listen for changes to the tool bar manager's enablement and\n # visibility.\n self.tool_bar_manager = tool_bar_manager\n\n self.tool_bar_manager.observe(\n self._on_tool_bar_manager_enabled_changed, \"enabled\"\n )\n\n self.tool_bar_manager.observe(\n self._on_tool_bar_manager_visible_changed, \"visible\"\n )\n\n return\n\n # ------------------------------------------------------------------------\n # Trait change handlers.\n # ------------------------------------------------------------------------\n\n def _on_tool_bar_manager_enabled_changed(self, event):\n \"\"\" Dynamic trait change handler. \"\"\"\n\n event.object.window._wx_enable_tool_bar(self, event.new)\n\n def _on_tool_bar_manager_visible_changed(self, event):\n \"\"\" Dynamic trait change handler. \"\"\"\n\n event.object.window._wx_show_tool_bar(self, event.new)\n\n\nclass _AuiToolBar(AUI.AuiToolBar):\n \"\"\" The toolkit-specific tool bar implementation for AUI windows. \"\"\"\n\n # ------------------------------------------------------------------------\n # 'object' interface.\n # ------------------------------------------------------------------------\n\n def __init__(self, tool_bar_manager, parent, id, style, agwStyle):\n \"\"\" Constructor. \"\"\"\n\n super().__init__(parent, -1, style=style, agwStyle=agwStyle)\n\n # Listen for changes to the tool bar manager's enablement and\n # visibility.\n self.tool_bar_manager = tool_bar_manager\n\n self.tool_bar_manager.observe(\n self._on_tool_bar_manager_enabled_changed, \"enabled\"\n )\n\n self.tool_bar_manager.observe(\n self._on_tool_bar_manager_visible_changed, \"visible\"\n )\n\n # we need to defer hiding tools until first time Realize is called so\n # we can get the correct order of the toolbar for reinsertion at the\n # correct position\n self.initially_hidden_tool_ids = []\n\n # map of tool ids to a tuple: position in full toolbar and the\n # ToolBarTool itself. Can't keep a weak reference here because once\n # removed from the toolbar the item would be garbage collected.\n self.tool_map = {}\n\n def Realize(self):\n if len(self.tool_map) == 0:\n for pos in range(self.GetToolsCount()):\n tool = self.GetToolByPos(pos)\n self.tool_map[tool.GetId()] = (pos, tool)\n AUI.AuiToolBar.Realize(self)\n if len(self.initially_hidden_tool_ids) > 0:\n for tool_id in self.initially_hidden_tool_ids:\n self.RemoveTool(tool_id)\n self.initially_hidden_tool_ids = []\n self.ShowTool = self.ShowToolPostRealize\n\n def ShowTool(self, tool_id, state):\n \"\"\"Used before realization to flag which need to be initially hidden\n \"\"\"\n if not state:\n self.initially_hidden_tool_ids.append(tool_id)\n\n def ShowToolPostRealize(self, tool_id, state):\n \"\"\"Normal ShowTool method, activated after first call to Realize\n \"\"\"\n tool = self.FindById(tool_id)\n if state and tool is None:\n self.InsertToolInOrder(tool_id)\n self.EnableTool(tool_id, True)\n self.Realize()\n # Update the toolbar in the AUI manager to force toolbar resize\n try:\n wx.CallAfter(\n self.tool_bar_manager.controller.task.window._aui_manager.Update\n )\n except:\n pass\n elif not state and tool is not None:\n self.RemoveTool(tool_id)\n # Update the toolbar in the AUI manager to force toolbar resize\n try:\n wx.CallAfter(\n self.tool_bar_manager.controller.task.window._aui_manager.Update\n )\n except:\n pass\n\n def InsertToolInOrder(self, tool_id):\n orig_pos, tool = self.tool_map[tool_id]\n pos = -1\n for pos in range(self.GetToolsCount()):\n existing_orig_pos, _ = self.tool_map[tool_id]\n if existing_orig_pos > orig_pos:\n break\n self.InsertToolItem(pos + 1, tool)\n\n ## Additional convenience functions for the normal AGW AUI toolbar\n\n def AddLabelTool(\n self,\n id,\n label,\n bitmap,\n bmpDisabled,\n kind,\n shortHelp,\n longHelp,\n clientData,\n ):\n \"The full AddTool() function.\"\n return self.AddTool(\n id,\n label,\n bitmap,\n bmpDisabled,\n kind,\n shortHelp,\n longHelp,\n clientData,\n None,\n )\n\n def InsertToolItem(self, pos, tool):\n self._items[pos:pos] = [tool]\n return tool\n\n def DeleteTool(self, tool_id):\n \"\"\"\n Removes the specified tool from the toolbar and deletes it.\n\n :param integer `tool_id`: the :class:`AuiToolBarItem` identifier.\n :returns: ``True`` if the tool was deleted, ``False`` otherwise.\n :note: Note that it is unnecessary to call :meth:`Realize` for the\n change to take place, it will happen immediately.\n \"\"\"\n\n tool = self.RemoveTool(tool_id)\n if tool is not None:\n tool.Destroy()\n return True\n\n return False\n\n def RemoveTool(self, tool_id):\n \"\"\"\n Removes the specified tool from the toolbar but doesn't delete it.\n\n :param integer `tool_id`: the :class:`AuiToolBarItem` identifier.\n :returns: ``True`` if the tool was deleted, ``False`` otherwise.\n :note: Note that it is unnecessary to call :meth:`Realize` for the\n change to take place, it will happen immediately.\n \"\"\"\n\n idx = self.GetToolIndex(tool_id)\n\n if idx >= 0 and idx < len(self._items):\n self._items.pop(idx)\n self.Realize()\n return True\n\n return False\n\n FindById = AUI.AuiToolBar.FindTool\n\n GetToolState = AUI.AuiToolBar.GetToolToggled\n\n GetToolsCount = AUI.AuiToolBar.GetToolCount\n\n def GetToolByPos(self, pos):\n return self._items[pos]\n\n def OnSize(self, event):\n # Quickly short-circuit if the toolbar isn't realized\n if not hasattr(self, \"_absolute_min_size\"):\n return\n\n AUI.AuiToolBar.OnSize(self, event)\n\n # ------------------------------------------------------------------------\n # Trait change handlers.\n # ------------------------------------------------------------------------\n\n def _on_tool_bar_manager_enabled_changed(self, event):\n \"\"\" Dynamic trait change handler. \"\"\"\n\n try:\n event.object.controller.task.window._wx_enable_tool_bar(\n self, event.new\n )\n except:\n\n pass\n\n def _on_tool_bar_manager_visible_changed(self, event):\n \"\"\" Dynamic trait change handler. \"\"\"\n\n try:\n event.object.controller.task.window._wx_show_tool_bar(\n self, event.new\n )\n except:\n\n pass\n return\n","repo_name":"enthought/pyface","sub_path":"pyface/ui/wx/action/tool_bar_manager.py","file_name":"tool_bar_manager.py","file_ext":"py","file_size_in_byte":14133,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"28"}
+{"seq_id":"72977078156","text":"\"\"\"Company Info Views\"\"\"\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\n\n\ndef about_us(request):\n \"\"\"View to return shipping and returns info page\"\"\"\n return render(request, 'company/about_us.html')\n\n\ndef privacy_policy(request):\n \"\"\"View to return shipping and returns info page\"\"\"\n return render(request, 'company/privacy_policy.html')\n\n\ndef faq_page(request):\n \"\"\"View to return FAQ's page\"\"\"\n return render(request, 'help/faqs.html')\n\n\ndef shipping_returns(request):\n \"\"\"View to return shipping and returns info page\"\"\"\n return render(request, 'help/shipping_returns.html')\n\n\ndef contact(request):\n if request.method == 'POST':\n first_name = request.POST.get('first_name')\n last_name = request.POST.get('last_name')\n subject = request.POST.get('subject')\n email_address = request.POST.get('email_address')\n message = request.POST.get('message')\n\n message_data = {\n 'first_name': first_name,\n 'last_name': last_name,\n 'email_address': email_address,\n 'subject': subject,\n 'message': message,\n }\n message = '''\n From: {}\n New message: {}\n '''.format(message_data['email_address'], message_data['message'])\n\n send_mail(\n message_data['subject'], message, '', ['ganiyatbadara@gmail.com'])\n\n messages.info(request, (\n f'Your message has been sent, we will contact you \\\n via { email_address } as soon as possible.'))\n return render(request, 'home/index.html')\n\n return render(request, 'contact_us/contact_us.html')\n","repo_name":"Oyindamolabadara/signature","sub_path":"company_info/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20297581149","text":"class VulnerabilityInfo(object):\n def __init__(self, name , v_type , cve_link , download_link):\n self.name = name\n self.type = v_type\n self.cve_link = cve_link\n self.download_link = download_link\n \n def dictionary(self):\n v_dict = dict()\n v_dict[\"name\"] = self.name\n v_dict[\"type\"] = self.type\n v_dict[\"cve_link\"] = self.cve_link\n v_dict[\"download_link\"] = self.download_link\n return v_dict","repo_name":"jedagda/project","sub_path":"src/model/VulnerabilityInfo.py","file_name":"VulnerabilityInfo.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"4451035939","text":"def hasCycle(self, head: Optional[ListNode]) -> bool:\n slow,fast=head,head\n while fast and fast.next:\n if fast.next.next==slow.next:\n return True\n else:\n slow=slow.next\n fast=fast.next.next\n return False\n \n","repo_name":"javaharreddy/strivers-sde-sheet-solutions","sub_path":"Linked List Cycle.py","file_name":"Linked List Cycle.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"71820003914","text":"\n\ndef SplitChip(s):\n chipName, ioPut = s.split(\"|\")\n ioPut = ioPut.split(\",\")\n return chipName, ioPut\n\n\ndef GetInput(chipName, ioPut, chipsData):\n inputData = []\n for i in chipsData[chipName][0]:\n for j in ioPut:\n if j.split(\"=\")[0] == i:\n inputData.append(j.split(\"=\")[1].split(\"[\")[0])\n return inputData\n\n\ndef GetOutput(chipName, ioPut, chipsData):\n outputData = []\n for i in chipsData[chipName][1]:\n for j in ioPut:\n if j.split(\"=\")[0] == i:\n outputData.append(j.split(\"=\")[1])\n return outputData\n\n\ndef GetInfoAboutChip(s, chipsData):\n chipName, ioPut = SplitChip(s)\n if chipName not in chipsData.keys():\n exit(chipName)\n inputData = GetInput(chipName, ioPut, chipsData)\n outputData = GetOutput(chipName, ioPut, chipsData)\n elapsedTime = chipsData[chipName][2]\n chipUsed = chipsData[chipName][3]\n return inputData, outputData, elapsedTime, chipUsed\n\n\ndef SplitSimpleFile(file, chipsData):\n result = [[\"true\", \"false\"], [], []] # 0 - IN, 1 - OUT, 2 - PARTS\n state = -1\n for s in file:\n if s in (\"IN:\", \"OUT:\", \"PARTS:\"):\n state += 1\n else:\n if state == 2:\n result[state].append(GetInfoAboutChip(s, chipsData))\n else:\n for substring in s.split(\",\"):\n if substring != \"\":\n result[state].append(substring)\n return result","repo_name":"Ferdea/CHIPS","sub_path":"processinfo.py","file_name":"processinfo.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"18947203929","text":"\n\nimport numpy as np \nimport pandas as pd \n\nfrom scipy import stats\nfrom statsmodels.stats.multitest import multipletests\n\nfrom typing import Dict, List\n\ndef calculateTTest(X : pd.DataFrame, columNamesGroup1 : List[str], columNamesGroup2 : List[str], multipleTestMethod : str = \"fdr_tsbky\"):\n \"\"\"\"\"\"\n X1, X2 = X.loc[:,columNamesGroup1], X.loc[:,columNamesGroup2]\n T,p = stats.ttest_ind(X1, X2, nan_policy=\"omit\", axis=1)\n\n boolIdx, p_adj, _, _ = multipletests(p, alpha=0.05, method=multipleTestMethod)\n tTestDifference = pd.DataFrame(pd.Series(X1.mean(axis=1) - X2.mean(axis=1), name=\"x\"))\n tTestDifference[\"y\"] = (-1)*np.log10(p)\n tTestDifference[\"s\"] = boolIdx\n return tTestDifference\n\n\ndef calculateOneWayANOVA(X : pd.DataFrame, groupings : Dict[str,Dict[str,list]], groupingName : str, anovaCutoff : float = 0.05):\n \"\"\"\n Calculate one way anova p values.\n \"\"\"\n if not groupingName in groupings:\n raise ValueError(f\"{groupingName} not found in groupings.\")\n #create empty result data frame \n oneWayANOVAColumnName = f\"p-1WANOVA({groupingName})\"\n results = pd.DataFrame(index = X.index, columns=[oneWayANOVAColumnName])\n #create list of values to be tested using anova\n testGroupData = [X[columnNames].values for columnNames in groupings[groupingName].values()]\n #returns F-value and p-values\n F,p = stats.f_oneway(*testGroupData,axis=1)\n #put values into result dataframe\n results.loc[X.index,oneWayANOVAColumnName] = p\n #annotate significant hits.\n significantBoolIdx = results.index[results[oneWayANOVAColumnName] < anovaCutoff]\n #create pandas Series with significant p-values\n selectionpvalues = [pd.Series(\n results.loc[X.index,oneWayANOVAColumnName].values, \n name=oneWayANOVAColumnName, index=X.index).loc[significantBoolIdx].reset_index()]\n pValueColumnName = [oneWayANOVAColumnName]\n\n return significantBoolIdx, selectionpvalues, pValueColumnName\n\n\n\nclass TwoWAyANOVA(object):\n\n def __init__(self,df, groupings,columnNames,*args,**kwargs):\n \"\"\n self.groupings = groupings\n self.groupingNames = list(groupings.keys())\n self.columnNames = columnNames\n self.df = df\n self.N = len(columnNames)\n\n def caulculate(self):\n\n df_a, df_b, df_axb, df_w = self.calculateDFs(self.groupingNames,self.groupings)\n grandMean = self.df[self.columnNames].mean(axis=1)\n ssq_a, ssq_b, ssq_axb, ssq_t, ssq_w = self.calculateSumOfSquares(self.groupings,self.groupingNames, grandMean)\n ms_a, ms_b, ms_axb, ms_w = self.calculateMS(ssq_a, ssq_b, ssq_axb, ssq_w, df_a,df_b,df_axb,df_w)\n F = self.calculateF(ms_a, ms_b, ms_axb, ms_w)\n p_a = pd.DataFrame([{\n \"p-value {}\".format(self.groupingNames[0]):stats.f.sf(fa,df_a,df_w),\n \"p-value {}\".format(self.groupingNames[1]):stats.f.sf(fb,df_b,df_w),\n \"p-value Interaction\":stats.f.sf(fab,df_axb,df_w)} for fa,fb,fab in F.values],\n index=self.df.index)\n return p_a\n\n def calculateDFs(self,groupingNames,groupings):\n \"\"\n \n df_a = len(groupings[groupingNames[0]]) - 1\n df_b = len(groupings[groupingNames[1]]) - 1\n df_axb = df_a*df_b \n df_w = self.N - (len(groupings[groupingNames[0]])*len(groupings[groupingNames[1]]))\n\n return df_a, df_b, df_axb, df_w\n\n\n def calculateSumOfSquares(self,groupings,groupingNames,grandMean):\n \"\"\n ssq_a_group = pd.DataFrame(dict([(groupItem,(self.df[groupItems].mean(axis=1)-grandMean).pow(2)) for _, groupItems in groupings[groupingNames[0]].items() for groupItem in groupItems]))\n ssq_b_group = pd.DataFrame(dict([(groupItem,(self.df[groupItems].mean(axis=1)-grandMean).pow(2)) for _, groupItems in groupings[groupingNames[1]].items() for groupItem in groupItems]))\n ssq_a = ssq_a_group.sum(axis=1)\n ssq_b = ssq_b_group.sum(axis=1)\n ssq_t = self.df[self.columnNames].subtract(grandMean,axis=0).pow(2,axis=1).sum(axis=1)\n withinMeans = []\n for groupName, groupItems in groupings[groupingNames[0]].items():\n\n groupData = self.df[groupItems]\n r = []\n colNames = []\n for groupName2, groupItems2 in groupings[groupingNames[1]].items():\n gis = [colName for colName in groupItems2 if colName in groupData.columns]\n r.extend([pd.Series(groupData[gis].mean(axis=1), name = gg) for gg in gis])\n colNames.extend(gis)\n\n withinMeans.append(pd.concat(r,axis=1))\n\n withinMeans = pd.concat(withinMeans,axis=1)\n rr = []\n for _, groupItems in groupings[groupingNames[0]].items():\n rr.append((self.df[groupItems].subtract(withinMeans[groupItems],axis=0)).pow(2))\n ssq_w = pd.concat(rr,axis=1).sum(axis=1)\n ssq_axb = ssq_t - ssq_a - ssq_b - ssq_w\n return ssq_a, ssq_b, ssq_axb, ssq_t, ssq_w\n\n def calculateMS(self,ssq_a,ssq_b,ssq_axb,ssq_w,df_a,df_b,df_axb,df_w):\n \"\"\n ms_a = ssq_a.divide(df_a)\n ms_b = ssq_b.divide(df_b)\n ms_axb = ssq_axb.divide(df_axb)\n ms_w = ssq_w.divide(df_w)\n\n return ms_a, ms_b, ms_axb, ms_w\n \n def calculateF(self,ms_a,ms_b,ms_axb,ms_w):\n \"\"\n f_a = ms_a.divide(ms_w)\n f_b = ms_b.divide(ms_w)\n f_axb = ms_axb.divide(ms_w)\n\n F = pd.concat([f_a,f_b,f_axb], axis=1)\n return F ","repo_name":"hnolCol/mitocube","sub_path":"backend/helper/StatsUtils.py","file_name":"StatsUtils.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"1638146071","text":"# Basic input validation\ndef validate_units(input_units):\n unit_lists = [\n [\"A\", \"a\", \"Ace\", \"ACE\", \"ace\"],\n [\"K\", \"k\", \"King\", \"KING\", \"king\"],\n [\"Q\", \"q\", \"Queen\", \"QUEEN\", \"queen\"],\n [\"J\", \"j\", \"Jack\", \"JACK\", \"jack\"],\n [\"10\", \"Ten\", \"TEN\", \"ten\"],\n [\"9\", \"Nine\", \"NINE\", \"nine\"],\n [\"8\", \"Eight\", \"EIGHT\", \"eight\"],\n [\"7\", \"Seven\", \"SEVEN\", \"seven\"],\n [\"6\", \"Six\", \"SIX\", \"six\"],\n [\"5\", \"Five\", \"FIVE\", \"five\"],\n [\"4\", \"Four\", \"FOUR\", \"four\"],\n [\"3\", \"Three\", \"THREE\", \"three\"],\n [\"2\", \"Two\", \"TWO\", \"two\"],\n ]\n for unit_list in unit_lists:\n if input_units in unit_list:\n return unit_list[0]\n print(\"I'm sorry that's not a valid response. \")\n return None\n\n\n# Library of card values to turn strings into blackjack value\ncard_values = {\n \"A\": [1], \"K\": [10], \"Q\": [10], \"J\": [10], \"10\": [10], \"9\": [9],\n \"8\": [8], \"7\": [7], \"6\": [6], \"5\": [5], \"4\": [4], \"3\": [3], \"2\": [2],\n}\n\n\nrun = True\n\n# Set card equal to user input, ensure it's valid, then return an int\nwhile run:\n player_first_card = input(\"What is your first card?\")\n player_first_card = validate_units(player_first_card)\n player_first_card = card_values[player_first_card][0]\n player_second_card = input(\"What is your second card?\")\n player_second_card = validate_units(player_second_card)\n player_second_card = card_values[player_second_card][0]\n player_third_card = input(\"what is your third card?\")\n player_third_card = validate_units(player_third_card)\n player_third_card = card_values[player_third_card][0]\n\n# Take the cards totals\n card_value = player_first_card + player_second_card + player_third_card\n\n# Fill the advice string with the appropriate move\n advice = \"\"\n\n if card_value < 17:\n advice = \"Hit!\"\n elif card_value < 21:\n advice = \"Stay, play it cool.\"\n elif card_value == 21:\n advice = \"Blackjack! You're amazing!\"\n elif card_value > 21:\n advice = \"It's too late for me, you've already busted..\"\n\n# Return the advice\n print(f\"You have {card_value}, {advice}\")\n","repo_name":"PdxCodeGuild/class_armadillo","sub_path":"Code/Talieson/python/Lab10-black_jack_advice.py","file_name":"Lab10-black_jack_advice.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"}
+{"seq_id":"14995585778","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 28 10:42:29 2021\n\n@author: dayman\n\"\"\"\n\n\n#TODO\n# Fix the program fucking up abbreviations for relations with duplicate words\n# Docs is none vs docs = None\n # Check why is important\n\n# Place all of main in a new function\n# Consider making regular argparse version... (With normal argument parsing)\n#\n\nfrom abbrev_gen import load_docs, create_bag_of_words, count_unique_words, get_LSA, generate_embedding, get_sims\nfrom abbrev_gen import generate_abbrev\nimport argparse\nimport numpy as np\n\ndef list_options():\n print('1: Load new dataset')\n print('2: Generate embedding based on loaded dataset')\n print('3: Generate abbreviation based on embedding')\n \ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\nparser = argparse.ArgumentParser(description = 'Getting that sweet PENIS')\n\n\n\nparser.add_argument('-l', '--list', help='Lists all options of LSA',\n action='store_true', default=False)\nparser.add_argument('-s', '--simple', type=str2bool, nargs='?',\n const=True, default=True,\n help=\"Activate simple mode.\")\nparser.add_argument('-v', '--verbosity', type=str2bool, nargs='?',\n const=True, default=False,\n help=\"Increase output verbosity.\")\n\nparser.add_argument('-ld', '--load_dataset', type=str, \n help='Loads a new dataset')\nparser.add_argument('-ge','--generate_embedding', type=int, \n help = 'Generate an LSA embedding based on loaded dataset')\nparser.add_argument('-cr', '--create_relation', nargs =2 + 5,\n help = 'Generate abbreviations based on desired abbreviation and relation') # TODO the amount of arguments expected dynamic with the relation string\n\nparser.add_argument('--test', action='store_true', help='simple testing function')\n\nargs = parser.parse_args()\n\n\ndef main():\n no_docs = no_embedding = False\n P_word = P_doc = vocab = BoW = docs = None \n print('Welcome to abbrev_gen V0.6')\n print(\"\"\"\n -----------------------\n -----------------------\n -----------------------\n --Ascii Art goes here--\n -----------------------\n -----------------------\n -----------------------\"\"\")\n\n \n while True:\n astr = input('$: ')\n \n try:\n args = parser.parse_args(astr.split()) # Parse args in while true because it is better\n except:\n print('Error when parsing argument,', astr.split()[0])\n continue\n \n \n if args.list:\n list_options()\n args.list = False\n \n if args.load_dataset is not None:\n docs = load_docs(args.load_dataset)\n args.load_dataset = False\n \n if args.generate_embedding is not None:\n if docs == None:\n print('No dataset loaded, loading default')\n docs = load_docs('articles.json', 500)\n \n P_word, P_doc, vocab, BoW = generate_embedding(docs)\n args.generate_embedding = None\n \n if args.create_relation is not None:\n \n if args.simple and docs == None:\n print('Loading default document set...')\n docs = load_docs('articles.json', 500, verbose=args.verbosity)\n \n if args.simple and None in (P_word, P_doc, vocab, BoW):\n print('Generating default embedding...')\n P_word, P_doc, vocab, BoW = generate_embedding(docs, verbose=args.verbosity)\n\n sing_vals = int(args.create_relation[0])\n abbrev = args.create_relation[1]\n relation = ' '.join(args.create_relation[2:]) # Probably stupid to join them here, but abbrev_gen accepts relations as strings, not list\n print(relation)\n \n if len(list(abbrev)) != len(relation): # TODO, make it break out of if statment here to preserve program\n print(\"Your relation must contain as many words as your abbreviation!\")\n \n if sing_vals == 0:\n print(P_word.shape)\n sing_vals = P_word.shape[1]\n \n \n sims = get_sims(abbrev, relation, vocab, BoW@P_word[:, 0:sing_vals])\n generate_abbrev(relation, sims)\n \n args.create_relation = None\n\n if args.test:\n args.test = False\n \n sing_vals = 0\n abbrev = 'penis'\n relation = 'der så en som er'\n\n if args.simple and docs == None:\n print('Loading default document set...')\n docs = load_docs('articles.json', 500, verbose=args.verbosity)\n \n if args.simple and None in (P_word, P_doc, vocab, BoW):\n print('Generating default embedding...')\n P_word, P_doc, vocab, BoW = generate_embedding(docs, verbose=args.verbosity)\n\n if sing_vals == 0:\n print(P_word.shape)\n sing_vals = P_word.shape[1]\n \n sims = get_sims(abbrev, relation, vocab, BoW@P_word[:, 0:sing_vals])\n \n k = 10\n s = 0\n print(sims[s:k], '\\n')\n\n while True:\n\n inp = input('For more abbreviations, press y, to end, press n')\n \n if 'n' in inp.lower():\n break\n elif 'y' in inp.lower():\n k += 10\n s += 10\n print(sims[s:k], '\\n')\n else:\n print('Please choose a valid option')\n \n \n #sims, axe = get_sims(abbrev, relation, vocab, BoW@P_word[:, 0:sing_vals])\n #generate_abbrev(relation, sims)\n\n \n# =============================================================================\n# if docs == None: # TODO, make it break out of if statment here to preserve program\n# print('Sorry, there are no documents loaded, load a document please')\n# print('Otherwise, turn simple mode on')\n# no_docs = True\n# #args.create_relation = None\n# \n# if None in (P_word, P_doc, vocab, BoW): # TODO, make it break out of if statment here to preserve program\n# print('Sorry, you have an invalid embedding, please generate a valid embedding to continue')\n# print('Otherwise, turn simple mode on')\n# no_embedding = True\n# #args.create_relation = None\n# =============================================================================\n \n \n\n# =============================================================================\n# def main():\n# docs = None \n# print('Welcome to abbrev_gen V0.6')\n# print(\"\"\"\n# -----------------------\n# -----------------------\n# -----------------------\n# --Ascii Art goes here--\n# -----------------------\n# -----------------------\n# -----------------------\"\"\")\n# \n# P_word = None\n# while True:\n# list_options()\n# \n# inp = input('...') \n# \n# if inp == '1':\n# print('enter path of dataset to load \\n')\n# path = input()\n# \n# \n# print('Enter number of documents of the dataset you want to use, leave blank for all')\n# no_docs = input()\n# \n# # Add options for stripping and trimming...\n# \n# docs = load_docs(path, no_docs)\n# \n# elif inp == '2':\n# # TODO, make dataset load automatically\n# if docs == None:\n# print('Error: No dataset loaded, loading default')\n# docs = load_docs('articles.json', 500)\n# \n# P_word, P_doc, vocab, BoW = generate_embedding(docs, 0)\n# \n# elif inp == '3':\n# \n# if P_word == None:\n# print('error, please generate a valid embedding first')\n# return\n# relation = input(' Please enter relation string of words seperated by whitespace \\n')\n# abbrev = input(f\"Enter desired abbreviation, it should be {len(relation.split(' '))} letters long \\n\")\n# \n# sims = get_sims(abbrev, relation, vocab, BoW@P_word)\n# \n# for i, word in enumerate(list(abbrev)):\n# print(sims[relation.split(' ')[i]][:10])\n# \n# if sims == None:\n# print('It appears there was a mistake, please try again')\n# continue\n# \n# generate_abbrev(relation, sims)\n# \n# =============================================================================\n#parser.add_argument('-s', '--simple', help='Turns simple mode on', \n# action='store_bool', const=False)\n \n \n# =============================================================================\n# print(\"\"\"\n# -----------------------\n# -----------------------\n# -------------( )-------\n# -----------( )------\n# ----------( )-----\n# ---------(__ __)-----\n# -----------( )------\n# -----------( )------\n# -----------( )------\n# --------#--( )--#----\n# ------#--#______#-#-----\n# --------#---------#----\"\"\")\n# =============================================================================\n \nmain()","repo_name":"TheGoldenChicken/abbrev-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"6021918020","text":"produto = str\nprodutocaro = str\nprodutocarov = 0\n\nwhile produto != 'XXX' or produto != 'xxx':\n print('-'*40)\n produto = input('Nome do Produto:')\n if produto == 'XXX' or produto == 'xxx':\n break\n \n vproduto = float(input('Valor do Produto em R$:'))\n\n if vproduto > produtocarov:\n produtocaro = produto\n produtocarov = vproduto\n\n\nprint('Produto mais caro:',produtocaro,', com valor de R$',produtocarov)\n \n \n\n\n","repo_name":"NicolausBR/UERJ-CC2022","sub_path":"Exercícios Lista 1/L.4.Maior Preço no mercado.py","file_name":"L.4.Maior Preço no mercado.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"29819976794","text":"# eyevator 上传数据 2020-04-11\n# encoding:utf-8\nimport time\nimport os\nimport sys\nimport oss2\nimport configparser as cfg\n\n\ndef read_config_file(config_path): #读取配置文件的一些内容\n config = cfg.ConfigParser()\n config.read(config_path)\n img_save_path_first = config['DEFAULT']['img_save_path_first']\n projectname = config['DEVICEINFO']['projectname']\n floor = config['DEVICEINFO']['floor']\n unit = config['DEVICEINFO']['unit']\n name = projectname+\"/\"+floor+\"-\"+unit\n \n return img_save_path_first,name\n\ndef get_time():\n local_time = time.localtime()\n formate_time = time.strftime(\"%Y-%m-%d\",local_time)\n return formate_time\n\ndef up_down_file_oss(bucket,yourObjectName, yourLocalFile,up=True): #将数据发送至云端,并显示上传进度\n def percentage(consumed_bytes, total_bytes):\n if total_bytes:\n rate = int(100 * (float(consumed_bytes) / float(total_bytes)))\n print('\\r{0}% '.format(rate), end='')\n\n sys.stdout.flush()\n if up: # 上传文件\n \n bucket.put_object_from_file(yourObjectName, yourLocalFile,progress_callback=percentage)\n if bucket.object_exists(yourObjectName):\n time.sleep(1)\n os.remove(yourLocalFile)\n print(\"uploading img %s success.\" % yourObjectName)\n else:\n print(\"uploading img error!!\")\n else: # 下载文件\n \n bucket.get_object_to_file(yourObjectName, yourLocalFile,progress_callback=percentage)\n print(\"downloading img success.\")\n\ndef create_space(bucket,service,name=None,delete=False): #创建一个桶,用来存放数据,如果桶已经存在,则不创建\n print([b.name for b in oss2.BucketIterator(service)]) # 列出所有存在的桶\n def read_object(delete_obj=False): # oss2.ObjectIteratorr用于遍历文件。遍历桶内每个文件\n for b in islice(oss2.ObjectIterator(bucket), 1, None):\n print(b.key)\n if delete_obj:\n bucket.delete_object(b.key)\n print(\"delete %s success\" % b.key)\n def does_bucket_exist(bucket):#判断存储空间是否存在\n try:\n bucket.get_bucket_info()\n except oss2.exceptions.NoSuchBucket:\n return False\n except:\n raise\n return True\n if delete:\n try:\n # 删除存储空间。\n bucket.delete_bucket()\n except oss2.exceptions.BucketNotEmpty:\n print('bucket is not empty.')\n except oss2.exceptions.NoSuchBucket:\n print('bucket does not exist')\n else:\n if does_bucket_exist(bucket): # 如果存在此桶,则不创建存储空间\n print(\"sorry bucket %s has exist.\" % name)\n else: # 不存在则创建\n bucket.create_bucket()\n print(\"creat bucket %s success.\" % name)\n\n\n\ndef slect_file(file_path): #用来检查文件是否有图像,并删除储存不完全的数据\n file_list = os.listdir(file_path)\n file_name = None\n file_size = None\n for file_name in file_list:\n file_size = os.stat(os.path.join(file_path, file_name)).st_size\n if file_size > 10000:\n break\n elif value and file_size < 100:\n os.remove(os.path.join(file_path, file_name))\n else:\n file_name = None\n return file_name, file_size\n\n\ndef main(): #我这里是用的阿里云OSS进行的云端数据存储,其账号和密码在购买后便知道\n # 阿里云主账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录 https://ram.console.aliyun.com 创建RAM账号。\n auth = oss2.Auth('', '')\n # 通过指定Endpoint和存储空间名称,您可以在指定的地域创建新的存储空间。Endpoint以杭州为例,其它Region请按实际情况填写。\n bucket = oss2.Bucket(auth, 'http://oss-cn-hangzhou.aliyuncs.com', '')\n # 上传文件到OSS时需要指定包含文件后缀在内的完整路径,例如abc/efg/123.jpg。\n # 由本地文件路径加文件名包括后缀组成,例如/users/local/myfile.txt。\n service = oss2.Service(auth, 'http://oss-cn-beijing.aliyuncs.com') #查看存储空间\n create_space(bucket,service,name=\"all-waring-img\")\n while True:\n try:\n name,_ = slect_file(img_save_path_first)\n if name:\n time.sleep(10)\n up_path = os.path.join(\"all-waring-img\",projectname,get_time(),name)\n up_down_file_oss(bucket,up_path,os.path.join(img_save_path_first,name))\n else:\n print(\"folder is empty!!\")\n time.sleep(2)\n except BaseException as e:\n print(\"error:\",e)\n time.sleep(2)\n\n\nif __name__ == \"__main__\":\n path = \"/home/pi/tools/darknet2ncnn/model_data/test.txt\" #这是我配置文件地址\n img_save_path_first,projectname = read_config_file(path)\n time.sleep(10) ##为了防止设备联网不及时,先等待一段时间\n while True: #如果联网不成功,继续尝试,直至链接成功\n try:\n main()\n except BaseException as e:\n print(\"Network error:\",e)\n time.sleep(2)\n","repo_name":"strivehub/Internet-of-things","sub_path":"uploading_data.py","file_name":"uploading_data.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"40094321591","text":"class Solution:\n def myAtoi(self, str: str) -> int:\n str = str.strip()\n if len(str) == 0 or (len(str) == 1 and not str.isdigit()):\n return 0\n elif str[0] in ['+', '-'] or str[0].isdigit():\n sign = -1 if str[0] == '-' else 1\n num_digits = 0\n start = 1 if str[0] in ['+', '-'] else 0\n flag = False\n for i in range(start, len(str)):\n num_digits = i\n if not str[i].isdigit():\n flag = True\n break\n if not flag:\n num_digits += 1\n value = 0\n for i in range(num_digits - 1, start - 1, -1):\n value += int(str[i]) * 10**(num_digits - i - 1)\n max_int = 2**31\n value *= sign\n if value > max_int - 1:\n return max_int - 1\n elif value < -max_int:\n return -max_int\n return value\n return 0\n","repo_name":"dawnonme/Eureka","sub_path":"main/leetcode/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"29155476770","text":"import numpy as np\r\nfrom pymongo import MongoClient\r\nimport redis\r\n\r\ndata_ip = \"162.105.89.243\"\r\nr = redis.Redis(host=data_ip)\r\n\r\nusers = list(r.zrevrange('round:97:scoreboard', 0, 10, withscores=True))\r\n\r\nwith open('round_97_score_detail.csv', 'w') as f:\r\n\tf.write(\"rank,player,score,create_wrong_link,create_correct_link,remove_correct_link,remove_wrong_link,remove_hinted_wrong_link\\n\")\r\n\tfor i in range(len(users)):\r\n\t\tusername, score = users[i]\r\n\t\tcreate_wrong_link = r.zscore('round:97:scoreboard:create_wrong_link', username)\r\n\t\tcreate_correct_link = r.zscore('round:97:scoreboard:create_correct_link', username)\r\n\t\tremove_correct_link = r.zscore('round:97:scoreboard:remove_correct_link', username)\r\n\t\tremove_wrong_link = r.zscore('round:97:scoreboard:remove_wrong_link', username)\r\n\t\tremove_hinted_wrong_link = r.zscore('round:97:scoreboard:remove_hinted_wrong_link', username)\r\n\t\tcreate_wrong_link = create_wrong_link if create_wrong_link else 0\r\n\t\tcreate_correct_link = create_correct_link if create_correct_link else 0\r\n\t\tremove_correct_link = remove_correct_link if remove_correct_link else 0\r\n\t\tremove_wrong_link = remove_wrong_link if remove_wrong_link else 0\r\n\t\tremove_hinted_wrong_link = remove_hinted_wrong_link if remove_hinted_wrong_link else 0\r\n\t\tf.write(\"%d,%s,%d,%d,%d,%d,%d,%d\\n\" % (i/2, username, score, create_wrong_link, \r\n\t\t\tcreate_correct_link, remove_correct_link,remove_wrong_link,remove_hinted_wrong_link))\r\n\r\n","repo_name":"TsukimiRini/CrowdJigsaw","sub_path":"dbscripts/round_score_detail.py","file_name":"round_score_detail.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"28"}
+{"seq_id":"31228832018","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.UserFormView.as_view(), name='login'),\n url(r'^index/$', views.IndexView.as_view(), name='index'),\n url(r'^analysis/$', views.AnalyseView.as_view(), name='analysis'),\n url(r'^download/(?P\\w+\\s{1,1}\\w+)/(?P[A-Z]\\d{4,4}-\\d{5,5}-\\d{5,5})/$', views.DownloadView.as_view(), name='download'),\n url(r'^test/$', views.PdfView.as_view(), name='pdf'),\n]\n\n\n","repo_name":"wuyiaishang/Driver-License-OCR","sub_path":"ocr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"28"}
+{"seq_id":"3835972989","text":"\"\"\"\r\nExercício de listas em Pyhton:\r\nUm professor solicitou um programa para saber a maior e menor nota em uma turma de 10 alunos.\r\nAs notas de 0 a 10 serão digitadas pelo teclado, verificadas e, ao final, os valores da maior e menor nota deverão\r\naparecer na tela do computador.\r\n\"\"\"\r\nnotas = []\r\n\r\ndef lista_notas(notas):\r\n nota = int(input('digite a nota: '))\r\n notas.append(nota)\r\n\r\nwhile len(notas) < 10:\r\n lista_notas(notas)\r\n print(notas)\r\n\r\nmenor = 10\r\nmaior = 0\r\n\r\nfor i in range(len(notas)):\r\n if maior < notas[i]:\r\n maior = notas[i]\r\n if menor > notas[i]:\r\n menor = notas[i]\r\n print('A maior nota é:', maior)\r\n\r\nprint(f'a maior nota da turma foi {menor} e a menor nota da turma foi {maior}')","repo_name":"raquelmrodrigues/raciocinio-computacional","sub_path":"atividade-7.py","file_name":"atividade-7.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20122472911","text":"group_size = int(input())\ndays_of_the_adventure = int(input())\ncoins = 0\ncompanions = 0\ncompanions += group_size\nfor days in range(1, days_of_the_adventure + 1):\n if days % 10 == 0:\n companions -= 2\n if days % 15 == 0:\n companions += 5\n if days % 3 == 0:\n coins -= companions * 3\n if days % 5 == 0:\n coins += 20 * companions\n if days % 3 == 0:\n coins -= companions * 2\n coins += 50\n coins -= companions * 2\nresult = int(coins / companions)\nprint(f\"{companions} companions received {result} coins each.\")\n","repo_name":"skeli74/python_fundamentals","sub_path":"data_types_and_variables/data_types_and_variables_lab_exercise/party_profit.py","file_name":"party_profit.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"42165301316","text":"import pygame\n\n\npygame.init()\n\nWINDOW_WIDTH = 600\nWINDOW_HEIGHT = 300\ndisplay_surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\npygame.display.set_caption('Movement')\n\nVELOCITY = 10\n\ndragon_img = pygame. transform. scale(\n pygame.image.load('./assets/pics/dragon.png'),\n (50, 50)\n)\ndragon_rect = dragon_img.get_rect()\ndragon_rect.centerx = WINDOW_WIDTH / 2\ndragon_rect.bottom = WINDOW_HEIGHT - 10\n\nrunning=True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n dragon_rect.x -= VELOCITY\n elif event.key == pygame.K_RIGHT:\n dragon_rect.x += VELOCITY\n elif event.key == pygame.K_UP:\n dragon_rect.y -= VELOCITY\n elif event.key == pygame.K_DOWN:\n dragon_rect.y += VELOCITY\n\n \n display_surface.fill((0,0,0))\n display_surface.blit(dragon_img, dragon_rect)\n pygame.display.update()\n\n\npygame.quit()","repo_name":"kamalfarahani/Pygame_examples","sub_path":"basics/6_keyboard_movement.py","file_name":"6_keyboard_movement.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"19052086944","text":"def play_35():\n\ts=input()\n\tl= l=s.split(' ')\n\tr,max,c=[],-1,0\n\tfor i in l:\n\t\tfor j in range(len(i)):\n\t\t\tfor k in range(j+1,len(i)):\n\t\t\t\tif i[j]==i[k]:\n\t\t\t\t\tc+=1\n\t\t\tif max int:\n r = table.rowCount()\n table.setRowCount(r + 1)\n\n st = QLabel(\"WAITING\")\n st.setAlignment(Qt.AlignCenter)\n table.setCellWidget(r, 0, st)\n table.setCellWidget(r, 1, QComboBox())\n table.setCellWidget(r, 2, QComboBox())\n\n for c in range(3, 6):\n table.setItem(r, c, QTableWidgetItem(\"\"))\n\n dp = QDateEdit(calendarPopup=True)\n dp.setDisplayFormat(\"dd MMM yyyy\")\n table.setCellWidget(r, 6, dp)\n\n for c in range(7, 9):\n table.setItem(r, c, QTableWidgetItem(\"\"))\n\n kp = QComboBox()\n kp.addItems(['Anggota', 'PR', 'PK', 'PAC',\n 'PKPT', 'PC', 'PW', 'PP'])\n table.setCellWidget(r, 9, kp)\n\n jb = QComboBox()\n jb.addItem('Anggota')\n jb.addItem('Ketua')\n jb.addItem('Wakil Ketua')\n jb.addItem('Sekretaris')\n jb.addItem('Wakil Sekretaris')\n jb.addItem('Bendahara')\n jb.addItem('Wakil Bendahara')\n jb.addItem('Departemen Organisasi')\n jb.addItem('Departemen Kaderisasi')\n jb.addItem('Departemen Jaringan Sekolah dan Pesantren')\n jb.addItem('Departemen Dakwah')\n jb.addItem('Departemen Seni Budaya dan Olahraga')\n jb.addItem('Departemen Jaringan Komunikasi dan Informatika')\n jb.addItem('Lembaga Corp Brigade Pembangunan')\n jb.addItem('Lembaga Ekonomi Koperasi dan Kewirausahaan')\n jb.addItem('Lembaga Pers dan Penerbitan')\n jb.addItem('Lembaga Anti Narkoba')\n jb.addItem('Lembaga Komunikasi Perguruan Tinggi')\n jb.addItem('Lembaga Advokasi dan Kebijakan Publik')\n jb.addItem('Badan Student Research Center')\n jb.addItem('Badan Student Crisis Center')\n table.setCellWidget(r, 10, jb)\n\n pf = QComboBox()\n pf.addItems(['makesta', 'lakmud', 'lakut'])\n table.setCellWidget(r, 11, pf)\n\n for c in [12, 16, 20]:\n op = QComboBox()\n op.addItems(['sudah', 'belum'])\n op.setCurrentIndex(1)\n table.setCellWidget(r, c, op)\n\n for c in [13, 14, 15, 17, 18, 19, 21, 22, 23, 25, 26]:\n table.setItem(r, c, QTableWidgetItem(\"\"))\n\n op = QComboBox()\n op.addItems(['ya', 'tidak'])\n op.setCurrentIndex(1)\n table.setCellWidget(r, 24, op)\n\n pnd = QComboBox()\n pnd.addItem('Tidak Ada')\n pnd.addItem('SD/Sederajat')\n pnd.addItem('SMP/Sederajat')\n pnd.addItem('SMA/Sederajat')\n pnd.addItem('D1')\n pnd.addItem('D2')\n pnd.addItem('D3')\n pnd.addItem('S1')\n pnd.addItem('S2')\n pnd.addItem('S3')\n table.setCellWidget(r, 27, pnd)\n\n for c in [13, 14, 17, 18, 19, 21, 22, 25, 26]:\n table.setItem(r, c, QTableWidgetItem(\"\"))\n\n for c in [15, 19, 23]:\n dp = QDateEdit(calendarPopup=True)\n dp.setDisplayFormat(\"dd MMM yyyy\")\n table.setCellWidget(r, c, dp)\n\n for c in range(28, 37):\n table.setItem(r, c, QTableWidgetItem(\"\"))\n\n st = QTableWidgetItem(\"\")\n st.setFlags(Qt.ItemFlag.ItemIsEnabled)\n table.setItem(r, 37, st)\n table.setColumnWidth(37, 250)\n\n fileBtn = QPushButton(\"Pilih Foto\", table)\n fileBtn.clicked.connect(\n lambda state, table=table, row=r: self._selectFile(state, row, table))\n table.setCellWidget(r, 38, fileBtn)\n\n return r\n\n def removeRow(self, table: QTableWidget):\n r = table.currentRow()\n table.removeRow(r)\n\n def fillRow(self, data, table, fillData):\n r, rows = data\n\n st = QLabel(\"WAITING\")\n st.setAlignment(Qt.AlignCenter)\n table.setCellWidget(r, 0, st)\n table.setCellWidget(r, 1, QComboBox())\n table.setCellWidget(r, 2, QComboBox())\n\n fillData(r)\n\n file_str = QTableWidgetItem(\"\")\n file_str.setFlags(Qt.ItemFlag.ItemIsEnabled)\n table.setItem(r, 37, file_str)\n\n fileBtn = QPushButton(\"Pilih Foto\", table)\n fileBtn.clicked.connect(\n lambda state, r=r, table=table: self._selectFile(state, r, table))\n table.setCellWidget(r, 38, fileBtn)\n\n for c, value in enumerate(rows):\n is_date, output = self._extractDate(value)\n if is_date or c in [16, 20]:\n dp = QDateEdit(calendarPopup=True)\n if isinstance(output, datetime):\n dp.setDate(output.date())\n dp.setDisplayFormat(\"dd MMM yyyy\")\n table.setCellWidget(r, c+3, dp)\n elif c == 6:\n kp = QComboBox()\n kp.addItems(['Anggota', 'PR', 'PK', 'PAC',\n 'PKPT', 'PC', 'PW', 'PP'])\n kp.setCurrentText(value)\n table.setCellWidget(r, c+3, kp)\n elif c == 7:\n jb = QComboBox()\n jb.addItem('Anggota')\n jb.addItem('Ketua')\n jb.addItem('Wakil Ketua')\n jb.addItem('Sekretaris')\n jb.addItem('Wakil Sekretaris')\n jb.addItem('Bendahara')\n jb.addItem('Wakil Bendahara')\n jb.addItem('Departemen Organisasi')\n jb.addItem('Departemen Kaderisasi')\n jb.addItem('Departemen Jaringan Sekolah dan Pesantren')\n jb.addItem('Departemen Dakwah')\n jb.addItem('Departemen Seni Budaya dan Olahraga')\n jb.addItem(\n 'Departemen Jaringan Komunikasi dan Informatika')\n jb.addItem('Lembaga Corp Brigade Pembangunan')\n jb.addItem('Lembaga Ekonomi Koperasi dan Kewirausahaan')\n jb.addItem('Lembaga Pers dan Penerbitan')\n jb.addItem('Lembaga Anti Narkoba')\n jb.addItem('Lembaga Komunikasi Perguruan Tinggi')\n jb.addItem('Lembaga Advokasi dan Kebijakan Publik')\n jb.addItem('Badan Student Research Center')\n jb.addItem('Badan Student Crisis Center')\n jb.setCurrentText(value)\n table.setCellWidget(r, c+3, jb)\n elif c == 8:\n pf = QComboBox()\n pf.addItems(['makesta', 'lakmud', 'lakut'])\n pf.setCurrentText(value.lower())\n table.setCellWidget(r, c+3, pf)\n elif c in [9, 13, 17]:\n op = QComboBox()\n op.addItems(['sudah', 'belum'])\n op.setCurrentText(value.lower())\n table.setCellWidget(r, c+3, op)\n elif c == 21:\n op = QComboBox()\n op.addItems(['ya', 'tidak'])\n op.setCurrentText(value.lower())\n table.setCellWidget(r, c+3, op)\n elif c == 24:\n pnd = QComboBox()\n pnd.addItem('Tidak Ada')\n pnd.addItem('SD/Sederajat')\n pnd.addItem('SMP/Sederajat')\n pnd.addItem('SMA/Sederajat')\n pnd.addItem('D1')\n pnd.addItem('D2')\n pnd.addItem('D3')\n pnd.addItem('S1')\n pnd.addItem('S2')\n pnd.addItem('S3')\n pnd.setCurrentText(value)\n table.setCellWidget(r, c+3, pnd)\n else:\n table.setItem(r, c+3, QTableWidgetItem(value))\n\n for col in [16, 20]:\n target = table.cellWidget(r, col)\n dp = table.cellWidget(r, col+3)\n dp.setDisabled(target.currentIndex() == 1)\n target.currentIndexChanged.connect(\n lambda idx, dp=dp: dp.setDisabled(idx == 1))\n\n def _extractDate(self, input):\n try:\n return True, datetime.strptime(input, '%Y-%m-%d')\n except:\n return False, input\n\n def _selectFile(self, state, r: int, table: QTableWidget):\n self.file_path, _ = QFileDialog.getOpenFileName(\n table, \"Pilih Foto Anggota\", \"\", \"Image File (*.jpg)\")\n\n if self.file_path:\n table.item(r, 37).setText(self.file_path)\n","repo_name":"fatkhur1960/sipadu-contonge","sub_path":"app/utils/table_util.py","file_name":"table_util.py","file_ext":"py","file_size_in_byte":8374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"69806654155","text":"# Example :- First write and then read operation.\n# Pre-existing text in the file is as follows :\n\"\"\"\nThis is pre-existing data in write-and-read-first file.This is second sentence of pre-existing data.\n\"\"\"\nf = open(\"files/write-and-read-first.txt\", \"w+\")\nf.write(\"\\nThis is a simple text for testing purpose.\")\nprint(\"First read : \", f.read())\n# Above read operation won't read anything as in this case pointer is positioned to the end of the file -\n# after performing write operation.And there is no text after that position.\n# So to actually get the data, we have to reposition the pointer at the beginning of the file.\nf.seek(0)\nprint(\"Second read : \", f.read())\n\n\"\"\"\nNote :-\nIf we perform read operation first then it will not produce any result and the operation will be useless\nas the pre-existing data is deleted while opening the file itself\nand in that case pointer will also remain at the same position(i.e at the beginning of the file)\nSo if we perform write operation after that, the flow will same as the above example\nSo omitting example for such scenario.\n\"\"\"\n","repo_name":"aman-saurabh/python-basics","sub_path":"sec2-file-handling/part4_write_and_read_mode1.py","file_name":"part4_write_and_read_mode1.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"20656112587","text":"from turtle import Screen\nfrom player import Player\nfrom scoreboard import ScoreBoard\nfrom car_manager import CarManager\nimport time\n\nscreen = Screen()\nscreen.setup(width=600,height=600)\nscreen.bgcolor(\"black\")\nscreen.tracer(0)\nscreen.listen()\n\n\nplayer = Player()\ncarmanager = CarManager()\nscoreboard = ScoreBoard()\n\nscreen.onkeypress(player.move,'w')\n\ngame_on = True\nwhile game_on:\n screen.update()\n time.sleep(0.1)\n carmanager.random_car()\n carmanager.obstacle()\n if player.ycor() > 260:\n scoreboard.update_score()\n player.reset_position()\n carmanager.increase_speed()\n for car in carmanager.cars:\n if car.distance(player) < 15:\n scoreboard.game_over()\n game_on = False\n\n\nscreen.exitonclick()","repo_name":"killerbeesama/the-turtle-crossing-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"33395799173","text":"import torch\n\nimport lavis\n\nclass MMCls(torch.nn.Module):\n def __init__(self, args, device):\n super(MMCls, self).__init__()\n\n self.args = args\n\n if args.model == 'blip':\n name = 'blip_feature_extractor'\n elif args.model == 'albef':\n name = 'albef_feature_extractor'\n else:\n raise NotImplementedError\n\n self.model, self.vis_processors, self.txt_processors = lavis.models.load_model_and_preprocess(name=name, model_type='base', is_eval=False, device=device)\n\n self.cls_head = torch.nn.Linear(768, 1)\n \n self.to(device)\n\n def forward(self, inputs):\n \n if self.args.text_only:\n inputs = {'text_input': inputs['text_input'], 'image': None}\n model_out = self.model.extract_features(inputs, mode='text')\n embs = model_out['text_embeds']\n elif self.args.image_only:\n inputs = {'image': inputs['image'], 'text_input': None}\n model_out = self.model.extract_features(inputs, mode='image')\n embs = model_out['image_embeds']\n else:\n model_out = self.model.extract_features(inputs)\n embs = model_out['multimodal_embeds']\n\n if self.args.pooling == '':\n # Slice off the first emb dim\n cls_in = embs[:, 0, :]\n elif self.args.pooling == 'mean':\n cls_in = torch.mean(embs, dim=1)\n else:\n raise NotImplementedError\n\n out = self.cls_head(cls_in)\n \n return out\n \n def get_processors(self):\n return self.vis_processors['eval'], self.txt_processors['eval']","repo_name":"i-need-sleep/checkthat","sub_path":"code/models/mm_classifier.py","file_name":"mm_classifier.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"38277487350","text":"import csv\nimport os\n\n# i would like to eventually get this using a web request... but is it worth it? I don't think the archive is being updated\nFILE_NAME = os.path.dirname(os.path.dirname(__file__)) + '/csv/exoplanetData.csv'\n\n# found out how to use the api... it's slow\n# docs: https://exoplanetarchive.ipac.caltech.edu/docs/TAP/usingTAP.html\n# more docs: https://exoplanetarchive.ipac.caltech.edu/docs/program_interfaces.html\nQUERY_URL = \"https://exoplanetarchive.ipac.caltech.edu/TAP/sync?query=select+pl_name,hostname,disc_year,discoverymethod,pl_orbper,pl_rade,pl_masse,pl_bmasse,pl_dens,sy_dist,st_metratio,pl_refname,st_refname,dec+from+ps&format=csv\"\n\ndef getSortedColumnName():\n \"Returns dictionary where key pl_name = list of planet data\"\n column_names = None\n data = {}\n\n with open(FILE_NAME) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if row[0][0] == '#':\n continue\n \n if line_count == 0:\n column_names = row\n\n for name in column_names:\n data[name] = []\n else:\n for i in range(len(row)):\n index = column_names[i]\n data[index].append(row[i])\n line_count += 1\n\n # with open('exoplanetsDict.py', 'w+') as newF:\n # newF.write('data=' + str(data))\n return data\n\ndef getSortedList():\n \"\"\"Returns list of planets, with no repetitions.\"\"\"\n column_names = None\n data = []\n used_names = []\n used_indices = []\n\n with open(FILE_NAME) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if row[0][0] == '#':\n continue\n \n if line_count == 0:\n column_names = row\n else:\n planetDict = {}\n used = False\n for i in range(len(row)):\n index = column_names[i]\n val = row[i]\n try:\n val = int(val)\n except:\n try:\n val = float(val)\n except:\n pass\n \n if index=='loc_rowid':\n val = len(data)\n if index == 'pl_name':\n if val in used_names:\n # need to combine values\n used = True\n else:\n used_names.append(val)\n planetDict[index] = val\n if used:\n # for index in range(len(data)):\n index = used_names.index(planetDict['pl_name'])\n if data[index]['pl_name'] == planetDict['pl_name']:\n existingData = data[index]\n for key, val in planetDict.items():\n if key != \"loc_rowid\" and existingData[key] != planetDict[key]:\n existingData[key] = planetDict[key]\n else:\n data.append(planetDict)\n line_count += 1\n\n # with open('exoplanetsList.py', 'w+') as newF:\n # newF.write('data=' + str(data))\n return data\n\ndef getSystemPlanets(data):\n \"\"\"Returns a dictionary where key = hostname and value = list of planets from data which is a list of planet data\"\"\"\n hosts = {}\n for planet_id in range(len(data)):\n planet = data[planet_id]\n hostname = planet['hostname']\n if not hostname in hosts:\n hosts[hostname] = []\n hosts[hostname].append(planet_id)\n return hosts","repo_name":"finn-ellis/ExoWebApp","sub_path":"static/python/exoplanetDataHandler.py","file_name":"exoplanetDataHandler.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"72409803595","text":"import streamlit as st\nimport os\nimport pandas as pd\nfrom streamlit.hashing import _CodeHasher\n\ntry:\n # Before Streamlit 0.65\n from streamlit.ReportThread import get_report_ctx\n from streamlit.server.Server import Server\nexcept ModuleNotFoundError:\n # After Streamlit 0.65\n from streamlit.report_thread import get_report_ctx\n from streamlit.server.server import Server\n\n\ndef main():\n state = _get_state()\n pages = {\n \"Dashboard\": page_dashboard,\n \"Object Tracking\": page_object_tracking,\n \"Coverter para .Record\": page_converted_record,\n }\n\n st.sidebar.title(\":floppy_disk: Page states\")\n page = st.sidebar.radio(\"Select your page\", tuple(pages.keys()))\n\n # Display the selected page with the session state\n pages[page](state)\n\n # Mandatory to avoid rollbacks with widgets, must be called at the end of your app\n state.sync()\n\n\ndef page_dashboard(state):\n st.title(\"Informacoes iniciais para o usuario\")\n\ndef page_converted_record(state):\n st.title(\"Converter os arquivos CSV para RECORD\")\n\n if st.button('Converter'):\n try:\n os.system(\n 'python scripts/opencv_object_tracking.py \\\n --video {} \\\n --tracker {} \\\n --label {} \\\n --vocxml {} \\\n --desting train \\\n --imagesdirectory images \\\n --cropimages {} \\\n '.format(state.video, state.track, state.label,\n state.voc, state.cropImage))\n except:\n st.error(\"Error while running the script open_object_tracking.py\")\n raise Exception(\"Error while running the script open_object_tracking.py\")\n \n try:\n os.system(\n 'python scripts/random_samples.py --folder images --train_num {}'.format(state.data_train)\n )\n except:\n st.error(\"Error while running the script random_sample.py\")\n raise Exception(\"Error while running the script random_sample.py\")\n \n try:\n os.system('python scripts/xml_to_csv.py --input {} --output ./csvs/ --file {}'.format('train', 'train'))\n os.system('python scripts/xml_to_csv.py --input {} --output ./csvs/ --file {}'.format('test', 'teste'))\n except:\n st.error(\"Error while running the script xml_to_csv.py\")\n raise Exception(\"Error while running the script xml_to_csv.py\")\n\n st.success(\"Conversão realizada com sucesso!☺\")\n \n st.write(\"Dados de treino\")\n st.dataframe(pd.read_csv(\"./csvs/train_labels.csv\"), width=800, height=500)\n \n st.write(\"Dados de teste\")\n st.dataframe(pd.read_csv(\"./csvs/teste_labels.csv\"), width=800, height=500)\n\ndef page_object_tracking(state):\n st.title(\"Bem vindo ao Object tracking\")\n\n options = [\"csrt\", \"kcf\", \"boosting\", \"mil\", \"tld\", \"medianflow\", \"mosse\", \"goturn\"]\n state.track = st.selectbox(\"Selecione o Tipo de rastreador de objeto OpenCV prefereido\", options)\n state.video = st.text_input(\"Informe o diretório do video/imagem:\", state.video or '')\n state.label = st.text_input(\"Informe o rótulo do objeto:\", state.label or '')\n state.voc = st.checkbox(\"Eu quero gerar arquivos pascal VOC XML 📃\", state.voc)\n state.yolo = st.checkbox(\"Eu quero gerar arquivos yolo TXT 📄\", state.yolo)\n state.cropImage = st.checkbox(\"Eu desejo Recortar os objetos das imagens 🤙\", state.cropImage)\n if state.voc == True and state.yolo == True:\n st.error(\"Selecione apenas uma das Opcoes 😉\")\n\n state.data_train = st.slider(\"Dividindo os dados para treino e teste\", \n min_value=1, max_value=100, step=1)\n state.data_teste = 100 - state.data_train\n st.write(\"Dados para treino: {}%\".format(state.data_train))\n st.write(\"Dados para teste: {}%\".format(state.data_teste))\n\ndef display_state_values(state):\n st.write(\"Input state:\", state.video)\n st.write(\"Slider state:\", state.folder)\n st.write(\"Radio state:\", state.radio)\n st.write(\"Checkbox state:\", state.checkbox)\n st.write(\"Selectbox state:\", state.selectbox)\n st.write(\"Multiselect state:\", state.multiselect)\n \n for i in range(3):\n st.write(f\"Value {i}:\", state[f\"State value {i}\"])\n\n if st.button(\"Clear state\"):\n state.clear()\n\n\nclass _SessionState:\n\n def __init__(self, session, hash_funcs):\n \"\"\"Initialize SessionState instance.\"\"\"\n self.__dict__[\"_state\"] = {\n \"data\": {},\n \"hash\": None,\n \"hasher\": _CodeHasher(hash_funcs),\n \"is_rerun\": False,\n \"session\": session,\n }\n\n def __call__(self, **kwargs):\n \"\"\"Initialize state data once.\"\"\"\n for item, value in kwargs.items():\n if item not in self._state[\"data\"]:\n self._state[\"data\"][item] = value\n\n def __getitem__(self, item):\n \"\"\"Return a saved state value, None if item is undefined.\"\"\"\n return self._state[\"data\"].get(item, None)\n \n def __getattr__(self, item):\n \"\"\"Return a saved state value, None if item is undefined.\"\"\"\n return self._state[\"data\"].get(item, None)\n\n def __setitem__(self, item, value):\n \"\"\"Set state value.\"\"\"\n self._state[\"data\"][item] = value\n\n def __setattr__(self, item, value):\n \"\"\"Set state value.\"\"\"\n self._state[\"data\"][item] = value\n \n def clear(self):\n \"\"\"Clear session state and request a rerun.\"\"\"\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()\n \n def sync(self):\n \"\"\"Rerun the app with all state values up to date from the beginning to fix rollbacks.\"\"\"\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n \n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)\n\n\ndef _get_session():\n session_id = get_report_ctx().session_id\n session_info = Server.get_current()._get_session_info(session_id)\n\n if session_info is None:\n raise RuntimeError(\"Couldn't get your Streamlit Session object.\")\n \n return session_info.session\n\n\ndef _get_state(hash_funcs=None):\n session = _get_session()\n\n if not hasattr(session, \"_custom_session_state\"):\n session._custom_session_state = _SessionState(session, hash_funcs)\n\n return session._custom_session_state\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"angrycaptain19/auto-ml-tf","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"3890940363","text":"from datetime import date\nfrom OpenWeatherMapsApi import WeatherMapsApi\nfrom WeatherMapsController import WeatherMapsControl\nfrom TemptureConversion import ConvertTemperature\nfrom CityErrorValidation import ValidationOfCity\nfrom CityStateValidation import ValidatingState\nfrom flask import Flask , render_template ,request,jsonify, redirect, url_for,flash\n\n#DSC 510\n#Week 12\n#Final Project\n#Programming Assignment Week 12\n#Author Felipe Castillo\n#06/03/2021\n\n#Final Check List\n'''\n- You must show at a bare minimum the following weather information\nCurrent Temp: 47.35 degrees\nHigh Temp: 50 degrees\nLow Temp: 44.01 degrees\nPressure: 1019hPa\nHumidity: 40%\n\n- You must allow the user to enter in a city and a zip code\n- You must have a really good interface\n- You MUST NOT display temp in Kelvin (this is not readable for most users)\n- Think of weather.com and what level of data and how the interface looks. Make sure that it is usable\n- You MUST NOT print JSON data to the screen.\n- You must show the user the city they're requesting. If the user inputs 68138 you should say weather for Omaha NE.\n\nWorking Example\nWould you like to lookup weather data by US City or zip code? Enter 1 for US City 2 for zip: 1\nPlease enter the city name: omaha\nPlease enter the state abbreviation: ne\nWould you like to view temps in Fahrenheit, Celsius, or Kelvin.\nEnter 'F' for Fahrenheit, 'C' for Celsius, 'K' for Kelvin: f\n\nEXAMPLE OUT PUT\nCurrent Temp: 47.35 degrees\nHigh Temp: 50 degrees\nLow Temp: 44.01 degrees\nPressure: 1019hPa\nHumidity: 40%\nCloud Cover: Minimual Cloud Cover\nWould you like to perform another weather lookup? (Y/N)\n'''\n\n'''\nNotes to professor\nIf user doesnt not insert the correct city a suggestion will be provided\nIf city and state can be validate no abbrevation will be provided\nIf user doesnt provide a temp for the radio button it will set it to F as default\nIf user misspells or doesnt insert correct values errors are validated by ZIP and CITY\n'''\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = False\napp.config['SECRET_KEY'] = 'BASE_REPLAY'\nweather_data = []\n\n\ndef date_time_today():\n today = date.today()\n reformattedDate = today.strftime(\"%B-%d-%Y\")\n return reformattedDate\n\ndef cloud_percentage_scale_amount(weather_session):\n cloud_description = ''\n clouds = weather_session.weatherClouds\n if(clouds > 0 and clouds <= 25):\n cloud_description = 'Minimal Clouds'\n elif(clouds > 25 and clouds <= 50):\n cloud_description = 'Mid Minimal Clouds'\n elif(clouds > 50 and clouds <= 75):\n cloud_description = 'Higher Then Average Clouds'\n else:\n cloud_description = 'Very High Amount of Clouds'\n return cloud_description\n\n\ndef temperature_type_scale(temperatureType,temperature_scale,weather_session):\n regularTemperature = ''\n maxTemp = ''\n minTemp = ''\n symbol = ''\n if(len(temperatureType) != 0): # Checking to make sure the list is not empty\n if(temperatureType[0] == 'F'):\n regularTemperature = temperature_scale.kelvinToFahrenheit(weather_session.weatherTempature)\n maxTemp = temperature_scale.kelvinToFahrenheit(weather_session.weatherTemMax)\n minTemp = temperature_scale.kelvinToFahrenheit(weather_session.weatherTemMin)\n symbol = '°F'\n elif(temperatureType[0] == 'C'):\n regularTemperature = temperature_scale.kelvinToCelsius(weather_session.weatherTempature)\n maxTemp = temperature_scale.kelvinToCelsius(weather_session.weatherTemMax)\n minTemp = temperature_scale.kelvinToCelsius(weather_session.weatherTemMin)\n symbol = '°C'\n elif(temperatureType[0] == 'K'):\n regularTemperature = weather_session.weatherTempature\n maxTemp = weather_session.weatherTemMax\n minTemp = weather_session.weatherTemMin\n symbol = '°K'\n else: # If user doesnt choose a type Default is Fahrenherit\n regularTemperature = temperature_scale.kelvinToFahrenheit(weather_session.weatherTempature)\n maxTemp = temperature_scale.kelvinToFahrenheit(weather_session.weatherTemMax)\n minTemp = temperature_scale.kelvinToFahrenheit(weather_session.weatherTemMin)\n symbol = '°F'\n else: #when the program runs if prevoius data exists it poplates old data until new data is present bt default always Fahrenheit\n regularTemperature = temperature_scale.kelvinToFahrenheit(weather_session.weatherTempature)\n maxTemp = temperature_scale.kelvinToFahrenheit(weather_session.weatherTemMax)\n minTemp = temperature_scale.kelvinToFahrenheit(weather_session.weatherTemMin)\n symbol = '°F'\n\n return regularTemperature,maxTemp,minTemp,symbol\n\n\ndef conntect_api_weather(city):\n json_object = ''\n weatherRequest = WeatherMapsApi(city)\n if(city.isnumeric()):\n url = weatherRequest.weather_url_zipCode()\n json_object = weatherRequest.JsonData(url)\n else:\n url = weatherRequest.weather_url_word()\n json_object = weatherRequest.JsonData(url)\n return json_object\n\n\ndef city_validation(json_object):\n try:\n cityFlag = ValidationOfCity(json_object)\n status = cityFlag.status_code()\n return status\n except:\n print(\"Couldnt validate city!\")\n\n\ndef weather_call(city):# making sure api connected correctly\n flag_city = ''\n try:\n json_object = conntect_api_weather(city)\n cityFlag = city_validation(json_object) # making sure user inserts a correct city\n if(cityFlag == False):\n flag_city = False\n else:\n flag_city = True\n except:\n print(\"Failed To Find Data information\")\n return flag_city\n\n\n@app.errorhandler(500)\ndef server_issue(expecption):\n print(expecption)\n\n return render_template(\"500.html\")\n\n@app.errorhandler(404)\ndef server_issue(expecption):\n print(expecption)\n\n return render_template(\"404.html\")\n\n@app.route(\"/\")\ndef root():\n return redirect(url_for('Find_Weather'))\n\n\n@app.route(\"/Find_Weather\", methods=['GET','POST'])\ndef Find_Weather():\n try:\n city = ''\n weather = ''\n error = True\n abbrevation = ''\n temperatureType = ''\n todaysDate = date_time_today()\n if request.method == 'POST':\n city = request.form.get('city_Lookup')\n abbrevation = request.form.get('state_Lookup')\n temperatureType = request.form.getlist('option')\n weather = weather_call(city)\n if not weather: #validated city return a good status\n json_object = conntect_api_weather(city) #Felipe Castillo\n weather_session = WeatherMapsControl(json_object)\n\n if weather_session.weatherName != None:\n state_abbrevation_confirmed = ''\n verfied_state = ValidatingState()\n\n if(abbrevation != \"\"): #user provids a abbrevation, making sure user doesnt use an incorrect abbrevation\n\n state = verfied_state.abv_state_record(abbrevation)\n confirmed_state = verfied_state.state_with_abv_look_up(weather_session.weatherName, state) #validated abv belongs to state\n state_abbrevation_confirmed = verfied_state.state_abv_lookup(confirmed_state) # returning confirmed abv\n\n if(state_abbrevation_confirmed == \"\" and abbrevation!= \"\"): # if abbrevation is not validated it means it incorrect will look to find a suggestion, suggestion will be provided\n state = verfied_state.state_look_up(weather_session.weatherName)\n state_abbrevation_confirmed = verfied_state.state_abv_lookup(state) + \"-\" + \"Suggested\"\n\n\n temperature_scale = ConvertTemperature() # Converting temperature to usr selection\n\n temperatures_scale_Returned = temperature_type_scale(temperatureType,temperature_scale,weather_session) # get the temperature according to user choice\n normalTemp = str(temperatures_scale_Returned).split(\",\")[0].replace(\"(\",\"\").strip()\n maxTemp = str(temperatures_scale_Returned).split(\",\")[1].replace(\"(\", \"\").strip()\n minTemp = str(temperatures_scale_Returned).split(\",\")[2].replace(\"(\", \"\").strip()\n symbol = str(temperatures_scale_Returned).split(\",\")[3].replace(\")\", \"\").strip()\n\n cloud_amount_sky = cloud_percentage_scale_amount(weather_session) # getting a converted decription of cloud coverage per the percetage in weather session object per city\n\n\n weatherInfo = {\n 'city': weather_session.weatherName+\", \"+state_abbrevation_confirmed,\n 'temperature': normalTemp+ \" \"+ symbol,\n 'temperature_max': maxTemp+ \" \"+ symbol,\n 'temperature_min': minTemp+\" \"+ symbol,\n 'pressure': str(weather_session.weatherPressure)+\" hPa\",\n 'humidity': str(weather_session.weatherHumidity)+ \"%\",\n 'cloud_amount' : cloud_amount_sky,\n 'icon': weather_session.weatherIcon\n }\n error = False\n weather_data.append(weatherInfo)\n\n else:\n error = True\n weatherInfo = {\n 'city': \"\",\n 'temperature': \"\",\n 'temperature max': \"\",\n 'temperature min': \"\",\n 'pressure': \"\",\n 'humidity': \"\",\n 'cloud_amount':\"\",\n 'icon': \"\"}\n error_message = 'Error message'\n if error:\n errorTpye = ''\n if city.isnumeric():\n errorTpye = 'ZIP CODE'\n else:\n errorTpye = 'CITY'\n flash(\"Please insert a correct \"+ errorTpye + \" this is invalid. Please retype or check spelling\", 'error')\n\n return render_template(\"index.html\", weatherInfo=weatherInfo, todaysDate=todaysDate)\n except:\n print(\"Could not load website possible 500 internet error.\")\n\n\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"cast6858/portfolio","sub_path":"Weather App With Python and Flask/WeatherMapsMain.py","file_name":"WeatherMapsMain.py","file_ext":"py","file_size_in_byte":10086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"41691848031","text":"import numpy as np\nimport os\nimport random\nimport torch\n\n\ndef get_images(paths, labels, nb_samples=None, shuffle=True):\n \"\"\"\n Takes a set of character folders and labels and returns paths to image files\n paired with labels.\n Args:\n paths: A list of character folders\n labels: List or numpy array of same length as paths\n nb_samples: Number of images to retrieve per character\n Returns:\n List of (label, image_path) tuples\n \"\"\"\n if nb_samples is not None:\n sampler = lambda x: random.sample(x, nb_samples)\n else:\n sampler = lambda x: x\n images_labels = [(i, os.path.join(path, image))\n for i, path in zip(labels, paths)\n for image in sampler(os.listdir(path))]\n if shuffle:\n random.shuffle(images_labels)\n return images_labels\n\n\ndef image_file_to_array(filename, dim_input):\n \"\"\"\n Takes an image path and returns numpy array\n Args:\n filename: Image filename\n dim_input: Flattened shape of image\n Returns:\n 1 channel image\n \"\"\"\n import imageio\n image = imageio.imread(filename)\n image = image.reshape([dim_input])\n image = image.astype(np.float32) / 255.0\n image = 1.0 - image\n return image\n\n\nclass DataGenerator(object):\n \"\"\"\n Data Generator capable of generating batches of Omniglot data.\n A \"class\" is considered a class of omniglot digits.\n \"\"\"\n\n def __init__(self, num_classes, num_samples_per_class, config={}, device = torch.device('cpu')):\n \"\"\"\n Args:\n num_classes: int\n Number of classes for classification (N-way)\n \n num_samples_per_class: int\n Number of samples per class in the support set (K-shot).\n Will generate additional sample for the querry set.\n \n device: cuda.device: \n Device to allocate tensors to.\n \"\"\"\n self.num_samples_per_class = num_samples_per_class\n self.num_classes = num_classes\n\n data_folder = config.get('data_folder', './omniglot_resized')\n self.img_size = config.get('img_size', (28, 28))\n\n self.dim_input = np.prod(self.img_size)\n self.dim_output = self.num_classes\n\n character_folders = [os.path.join(data_folder, family, character)\n for family in os.listdir(data_folder)\n if os.path.isdir(os.path.join(data_folder, family))\n for character in os.listdir(os.path.join(data_folder, family))\n if os.path.isdir(os.path.join(data_folder, family, character))]\n\n random.seed(1)\n random.shuffle(character_folders)\n num_val = 100\n num_train = 1100\n self.metatrain_character_folders = character_folders[: num_train]\n self.metaval_character_folders = character_folders[\n num_train:num_train + num_val]\n self.metatest_character_folders = character_folders[\n num_train + num_val:]\n self.device = device\n\n def sample_batch(self, batch_type, batch_size):\n \"\"\"\n Samples a batch for training, validation, or testing\n Args:\n batch_type: str\n train/val/test set to sample from\n \n batch_size: int:\n Size of batch of tasks to sample\n \n Returns:\n images: tensor\n A tensor of images of size [B, K+1, N, 784]\n where B is batch size, K is number of samples per class, \n N is number of classes\n \n labels: tensor\n A tensor of images of size [B, K+1, N, N] \n where B is batch size, K is number of samples per class, \n N is number of classes\n \"\"\"\n if batch_type == \"train\":\n folders = self.metatrain_character_folders\n elif batch_type == \"val\":\n folders = self.metaval_character_folders\n else:\n folders = self.metatest_character_folders\n\n #############################\n #### YOUR CODE GOES HERE ####\n #############################\n\n # SOLUTION:\n\n classes = self.num_classes\n shots = self.num_samples_per_class\n shots -= 1\n\n images = torch.zeros(batch_size, shots + 1, classes, 784)\n labels = torch.zeros(batch_size, shots + 1, classes, classes)\n\n for task in range(batch_size):\n\n # N*N identity matrix.\n # in particular, each row is the one-hot vector wrt its class label.\n one_hots = np.identity(classes)\n # randomly select character sets (the N)\n chars = np.random.choice(folders, classes)\n # randomly select shots for each N (the K)\n dta_train = get_images(chars, one_hots, nb_samples=shots)\n # randomly select test, with mixed up label\n dta_test = get_images(chars, one_hots, nb_samples=1, shuffle=True)\n\n # control flow for populating query data in loop\n flag = False\n for shot in range(shots):\n for clss in range(classes):\n # fix a shot, iterate through the class representatives \n # for that shot and populate the training data.\n label, img = dta_train[clss*shots + shot]\n img = image_file_to_array(img, 28 * 28)\n images[task, shot, clss] = torch.from_numpy(img)\n labels[task, shot, clss] = torch.from_numpy(label)\n\n if not flag:\n # populate the query data.\n label, img = dta_test[clss]\n img = image_file_to_array(img, 28*28)\n # remember 0 indexing\n images[task, shots, clss] = torch.from_numpy(img) \n labels[task, shots, clss] = torch.from_numpy(label)\n \n # after first iteration of outer loop, \n # query data will be fully populated.\n flag = True\n\n return images, labels\n\n","repo_name":"akazachek/Stanford-CS-330","sub_path":"Assignment 1/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"28433211773","text":"from PIL import Image\nimport glob, sys\n\nargs = sys.argv\ncmds = ['do','show','doandshow']\nhelp_list = cmds.copy()\nhelp_list.append('are the available commands for this application.')\nhelp_msg = ' '.join(help_list)\n\nif len(args) != 2 or args[1] not in cmds:\n print(help_msg)\nelse:\n cmd = args[1]\n\n if cmd == 'do' or cmd == 'doandshow':\n for infile in glob.glob(\"*.jpg\"):\n im = Image.open(infile)\n im.rotate(-90, expand=1).save(infile)\n if cmd == 'show' or cmd == 'doandshow':\n for infile in glob.glob(\"*.jpg\"):\n im = Image.open(infile)\n im.show()","repo_name":"PaluMacil/rotate","sub_path":"rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"35752798696","text":"from threading import Thread\nfrom queue import Queue\nimport socket\nimport cv2\nimport numpy\nimport time\nimport sys\nfrom config import Config\n\nclass FileVideoStream:\n\t\n\tdef __init__(self, transform=None, queue_size=128):\n\t\t# initialize the file video stream along with the boolean\n\t\t# used to indicate if the thread should be stopped or not\n\t\tself.stream = cv2.VideoCapture(0)\n\t\tself.stream.set(cv2.CAP_PROP_MODE, cv2.CAP_MODE_YUYV)\n\t\tself.stopped = False\n\t\tself.transform = transform\n\n\t\t# initialize the queue used to store frames read from\n\t\t# the video file\n\t\tself.Q = Queue(maxsize=queue_size)\n\t\t# intialize thread\n\t\tself.thread = Thread(target=self.update, args=())\n\t\tself.thread.daemon = True\n\n\t\tself.address = None\n\t\tself.init_connection()\n\n\t\t#压缩参数,后面cv2.imencode将会用到,对于jpeg来说,15代表图像质量,越高代表图像质量越好为 0-100,默认95\n\t\tencode_param=[int(cv2.IMWRITE_JPEG_QUALITY),15]\n\n\tdef init_connection():\n\t\tcon = Config()\n\t\thost = con.get(\"server\", \"host\")\n\t\tport = con.get(\"server\", \"port\")\n\t\tself.address = (host, int(port))\n\t\ttry:\n\t\t\tsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\t\texcept socket.error as msg:\n\t\t\tprint(msg)\n\t\t\tsys.exit(1)\n\n\tdef start(self):\n\t\t# start a thread to read frames from the file video stream\n\t\tself.thread.start()\n\t\treturn self\n\n\tdef update(self):\n\t\t# keep looping infinitely\n\t\twhile True:\n\t\t\t# if the thread indicator variable is set, stop the\n\t\t\t# thread\n\t\t\tif self.stopped:\n\t\t\t\tbreak\n\n\t\t\t# otherwise, ensure the queue has room in it\n\t\t\tif not self.Q.full():\n\t\t\t\t# read the next frame from the stream\n\t\t\t\t(grabbed, frame) = self.stream.read()\n\n\t\t\t\t# if the `grabbed` boolean is `False`, then we have reached the end of the video file\n\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\tself.stopped = True\n\t\t\t\t\n\t\t\t\t# if there are transforms to be done, might as well\n\t\t\t\t# do them on producer thread before handing back to\n\t\t\t\t# consumer thread. ie. Usually the producer is so far\n\t\t\t\t# ahead of consumer that we have time to spare.\n\t\t\t\t#\n\t\t\t\t# Python is not parallel but the transform operations\n\t\t\t\t# are usually OpenCV native so release the GIL.\n\t\t\t\t#\n\t\t\t\t# Really just trying to avoid spinning up additional\n\t\t\t\t# native threads and overheads of additional\n\t\t\t\t# producer/consumer queues since this one was generally\n\t\t\t\t# idle grabbing frames.\n\t\t\t\tif self.transform:\n\t\t\t\t\tframe = self.transform(frame)\n\n\t\t\t\t# add the frame to the queue\n\t\t\t\tself.Q.put(frame)\n\t\t\telse:\n\t\t\t\ttime.sleep(0.1) # Rest for 10ms, we have a full queue\n\n\t\tself.stream.release()\n\n\tdef read(self):\n\t\t# return next frame in the queue\n\t\treturn self.Q.get()\n\n\t# Insufficient to have consumer use while(more()) which does\n\t# not take into account if the producer has reached end of\n\t# file stream.\n\tdef running(self):\n\t\treturn self.more() or not self.stopped\n\n\tdef more(self):\n\t\t# return True if there are still frames in the queue. If stream is not stopped, try to wait a moment\n\t\ttries = 0\n\t\twhile self.Q.qsize() == 0 and not self.stopped and tries < 5:\n\t\t\ttime.sleep(0.1)\n\t\t\ttries += 1\n\n\t\treturn self.Q.qsize() > 0\n\n\tdef stop(self):\n\t\t# indicate that the thread should be stopped\n\t\tself.stopped = True\n\t\t# wait until stream resources are released (producer thread might be still grabbing frame)\n\t\tself.thread.join()\n\ndef SendVideo():\n\tcon = Config()\n\thost = con.get(\"server\", \"host\")\n\tport = con.get(\"server\", \"port\")\n\t\n\taddress = (host, int(port))\n\t\n\t# address = ('10.18.96.207', 8002)\n\ttry:\n\t\tsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\texcept socket.error as msg:\n\t\tprint(msg)\n\t\tsys.exit(1)\n\t\t\n\t\n\tcapture = cv2.VideoCapture(0)\n\tcapture.set(cv2.CAP_PROP_MODE, cv2.CAP_MODE_YUYV)\n\t#读取一帧图像,读取成功:ret=1 frame=读取到的一帧图像;读取失败:ret=0\n\tret, frame = capture.read()\n\t\n\t\n\twhile ret:\n\t\t#停止0.1S 防止发送过快服务的处理不过来,如果服务端的处理很多,那么应该加大这个值\n\t\ttime.sleep(0.01)\n\t\tret, frame = capture.read()\n\t\t\n\t\tfor i in range(20):\n\t\t time.sleep(0.001)\n\t\t sock.sendto(s[i*46080:(i+1)*46080]+str.encode(str(i).zfill(2)), address)\n\n\t\t# result, imgencode = cv2.imencode('.jpg', frame, encode_param)\n\t\t# data = numpy.array(imgencode)\n\t\t# stringData = data.tostring()\n\t\t\n\t\t# save data\n\t\t# cv2.imwrite('read video data.jpg', frame, encode_param)\n\t\t# show locally\n\t\t# cv2.imshow('read video data.jpg', frame)\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tbreak\n\t\t\n\t\t# 读取服务器返回值\n\t\t# receive = sock.recvfrom(1024)\n\t\t# if len(receive): print(str(receive,encoding='utf-8'))\n\t\t# if cv2.waitKey(10) == 27: break\n\t\t\t\n\tcapture.release()\n\tcv2.destroyAllWindows()\n\tsock.close();\n\n\t\nif __name__ == '__main__':\n\tSendVideo()\n","repo_name":"820fans/UDP-Video-Transfer","sub_path":"_backup/0123/client_bak.py","file_name":"client_bak.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"28"}
+{"seq_id":"38639190214","text":"import subprocess\nimport timeit\nfrom TaskResult import TaskResult\nimport TaskResults\nimport uuid\n\n#Class for tasks sent to the command line.\n#Strings passed into the command field of the constructor should be\n#valid command line code.\nclass Task:\n\n #Has fields: description (string) of the task. command (string) that will\n # be run when execute() is called. GUID (uuid), a unique identifier\n def __init__(self, description, command):\n self.description = description\n self.command = command\n self.GUID = uuid.uuid4()\n\n #Function to execute the command\n def execute(self):\n #The branch where command works.\n try:\n start = timeit.default_timer()\n output = subprocess.check_output(self.command)\n end = timeit.default_timer()\n #(Guid, True for execution worked, 0 for success return_code, the output,\n # no error string, and end - start gives execution duration\n task_result = TaskResult(self.GUID, True, 0, output, None, (end - start))\n TaskResults.new_task(task_result)\n\n #Branch for a command that throws an error that can be processed.\n except subprocess.CalledProcessError as e:\n end = timeit.default_timer()\n #Guid, False for a failed execution, the failed return code, the\n #normal output, the exception string, and the time it took before failure\n task_result = TaskResult(self.GUID, False, e.returncode, e.output,\n str(e), (end - start))\n TaskResults.new_task(task_result)\n\n #Branch for commands that throw errors that cannot be processed\n except Exception as e:\n end = timeit.default_timer()\n #Guid, False for failure, no return code can be salvaged, no output\n #exists, string for the error message, and time before failure.\n task_result = TaskResult(self.GUID, False, None, None,\n str(e), (end - start))\n print(\"\\nError: Task threw a non-typical error, make sure command is \"\n + \"valid.\\n Command: \" + self.command)\n TaskResults.new_task(task_result)\n \n","repo_name":"AlexNVanPatten/TaskQueue","sub_path":"Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"70289598155","text":"from config.common import *\nimport os\n\nDEBUG = True\nSQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n os.path.join(BASE_DIR, 'flastagram.db'))\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nPROPGATE_EXECPTIONS = True\nJWT_SECRET_KEY = os.environ[\"JWT_SECRET_KEY\"]\nSECRET_KEY = os.environ[\"APP_SECRET_KEY\"]\nJWT_BLACKLIST_ENABLED = True\nJWT_BLACKLIST_TOKEN_CHECKS = [\"access\", \"refresh\"]\n","repo_name":"D-Sup/Flask-instagram","sub_path":"backend/config/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"73242490314","text":"import os\nimport sys\nimport numpy as np\nimport tensorflow\nfrom keras.models import Model, load_model\nfrom keras.layers import *\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.convolutional import Conv2D\nfrom hw3_fc import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n\ndef ensemble(inputs, models):\n\toutputs = [model.outputs[0] for model in models]\n\taverage = Average()(outputs)\n\tmodel = Model(inputs=inputs, outputs=average)\n\treturn model\n\n#These functions of the models are defined in hw3_fc.py\n#It is in the same folder\n\ninputs = Input(shape=(48, 48, 1))\nmodel1 = gen08(inputs)\nmodel2 = gen09(inputs)\nmodel3 = gen12(inputs)\nmodel4 = func01(inputs)\nmodel5 = func02(inputs)\nmodel6 = Res02(inputs)\nmodel7 = Res03(inputs)\n\nprint(\"=========================Loading weight========================\")\n\nmodel1.load_weights('weights/gen08.hd5f')\nmodel2.load_weights('weights/gen09.hd5f')\nmodel3.load_weights('weights/gen12.hd5f')\nmodel4.load_weights('weights/func01.hd5f')\nmodel5.load_weights('weights/func02.hd5f')\nmodel6.load_weights('weights/ResNet02.hd5f')\nmodel7.load_weights('weights/ResNet03.hd5f')\n\n\nprint(\"=============================Merging===========================\")\n\nmodels = [model1, model2, model3, model4, model5, model6, model7]\nmodel = ensemble(inputs, models)\nmodel.save('ensemble.h5')\nprint(\"============================Done================================\")","repo_name":"yoshonabee/ML2018SPRING","sub_path":"hw4/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"11330119308","text":"def uniq(iterable):\n values = set()\n for i in iterable:\n v = tuple(sorted(i))\n if v not in values:\n yield i\n values.add(v)\n\n\ndef memoize(func):\n cache = {}\n\n def inner(n):\n if n not in cache:\n cache[n] = func(n)\n return cache[n]\n\n return inner\n\n\n@memoize\ndef combos(n):\n if n == 1:\n return [[1]]\n\n result = [[n]]\n for i in range(1, n):\n result.extend([i] + l for l in combos(n - i))\n\n return list(uniq(result))\n","repo_name":"mzoz/coding-challenges","sub_path":"codewars/solutions/python/4 kyu/Find all possible number combos that sum to a number/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"73389941514","text":"from django.db import models\n\n# Create your models here.\nfrom django.db import models\nfrom django.utils import timezone\nfrom datetime import timedelta\n\nGender = [\n ('male','male'),\n ('female','female')\n]\n\nclass Event(models.Model):\n DAYS_OF_WEEK = [\n ('Monday', 'Monday'),\n ('Tuesday', 'Tuesday'),\n ('Wednesday', 'Wednesday'),\n ('Thursday', 'Thursday'),\n ('Friday', 'Friday'),\n ('Saturday', 'Saturday'),\n ('Sunday', 'Sunday')\n ]\n title = models.CharField(max_length=200)\n image = models.ImageField(null=True)\n price = models.CharField(max_length = 255,null=True)\n description = models.CharField(max_length = 255,null=True)\n location = models.CharField(max_length = 255, null =True)\n created_at = models.DateTimeField(auto_now_add=True, null=True)\n start_date = models.DateField()\n end_date = models.DateField()\n start_time = models.TimeField()\n end_time = models.TimeField()\n is_private = models.BooleanField(default=False)\n private_password = models.CharField(max_length=255, null=True, blank=True)\n is_recurring = models.BooleanField(default=False)\n recurrence_day = models.CharField(max_length=10, null=True, blank=True, choices=DAYS_OF_WEEK)\n\n def __str__(self):\n return self.title\n\n def is_today(self):\n return self.start_date <= timezone.now().date() <= self.end_date\n\n def is_recurring_today(self):\n if not self.is_recurring:\n return False\n return timezone.now().strftime('%A') == self.recurrence_day\n\n def is_expired(self):\n end_datetime = timezone.make_aware(\n timezone.datetime.combine(self.end_date, self.end_time)\n )\n return timezone.now() > end_datetime\n \n def is_expired(self):\n end_datetime = timezone.make_aware(\n timezone.datetime.combine(self.end_date, self.end_time)\n )\n return timezone.now() > end_datetime\n\n\nclass MakeReservation(models.Model):\n event = models.ForeignKey(Event, verbose_name='Event', on_delete=models.CASCADE)\n Name = models.CharField(verbose_name='First Name', max_length=255)\n Email_address = models.EmailField()\n Confirm_address = models.EmailField(null=True)\n Number_of_guest = models.IntegerField(null=True)\n Gender = models.CharField(max_length=255, choices=Gender)\n \n def __str__(self):\n return self.Name\n ","repo_name":"DavidOmizz/Popat","sub_path":"my_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"6533287102","text":"\"\"\"\n Description: This file has some functions which\n are used to execute/insert some data from/to Database.\n\n Database consists of some tables:\n log -- id, time, info\n info_global -- week, time_lesson1, time_lesson2, time_lesson3, time_lesson4, time_lesson5,\n time_lesson6, day2, day3, day4, day5, day6, day1,\n day2, day3, day4, day5, day6, day7 ,day8, day9, day10, day11, day12, day13, day14\n info_professor -- group_name, subject, name, type, link\n info_users -- user_id, user_name, user_surname, user_nickname\n list_groups -- group_name\n schedule -- group_name, day1, day2, day3, day4, day5, day6 ,day8, day9, day10, day11, day12, day13\n users -- user_id, group_name, schedule_switch, role, is_blocked\n game -- user_id, user_name_game, total_score, total_games\n\n Author: Mikhail Shikalovskyi\n Version: 1.0 (release)\n\"\"\"\nimport Database.reformattion_data as reformation_data\nimport Database.SQL as SQL\nimport Database.db_function as db_function\nimport parsing\nfrom loger_config import logger\n\n\ndef check_group(group: str) -> bool:\n \"\"\"\n Function which checks if group exists in database\n\n :param group: group name which is used in filter\n \"\"\"\n filter = f\"SELECT * FROM list_groups WHERE group_name = '{group}'\"\n result = reformation_data.reformat_str(SQL.execute(filter))\n return bool(result)\n\n\ndef check_user(user_id: int) -> bool:\n \"\"\"\n Function which checks if user exists in database\n\n :param user_id: user id which is used in filter\n \"\"\"\n filter = f\"SELECT * FROM users WHERE user_id = '{user_id}'\"\n result = reformation_data.reformat_int(SQL.execute(filter))\n return bool(result)\n\n\ndef check_user_role(user_id: int) -> str:\n \"\"\"\n Function which checks role of user in database\n\n :param user_id: user id which is used in filter\n \"\"\"\n filter = f\"SELECT role FROM users WHERE user_id = '{user_id}'\"\n result = reformation_data.reformat_str(SQL.execute(filter))\n return result\n\n\ndef transfer_role(user_id: int, new_user_id: int):\n \"\"\"\n Function which transfers role from 'moderator' to other user\n\n :param user_id: user id which is used in filter\n :param new_user_id: new user id which is used in second filter\n \"\"\"\n filter = f\"UPDATE users SET role = 'user' WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n filter = f\"UPDATE users SET role = 'moderator' WHERE user_id = '{new_user_id}'\"\n SQL.table_operate(filter)\n db_function.add_log(f\"Transfer successful from {user_id} to {new_user_id}\")\n\n\ndef get_userid_by_nickname(user_nickname: str) -> int:\n \"\"\"\n Function which checks role of user in database\n\n :param user_nickname: user nickname which is used in filter\n \"\"\"\n filter = f\"SELECT user_id FROM info_users WHERE user_nickname = '{user_nickname}'\"\n result = reformation_data.reformat_int(SQL.execute(filter))\n return result\n\n\ndef users_nickname_by_group(user_group: str) -> list:\n \"\"\"\n Function which checks role of user in database\n\n :param user_group: user group name which is used in filter\n \"\"\"\n filter = f\"SELECT user_nickname FROM info_users JOIN users ON info_users.user_id = users.user_id WHERE users.group_name = '{user_group}' AND users.role = 'user'\"\n result = reformation_data.reformat_list(SQL.execute(filter))\n return result\n\n\ndef count_moderators(group_name: str) -> int:\n \"\"\"\n Function which checks quantity of moderators in group and returns name on role chosen\n\n :param group_name: user group name which is used in filter\n \"\"\"\n filter = f\"SELECT user_id FROM users WHERE role = 'moderator' AND group_name = '{group_name}'\"\n result = reformation_data.reformat_list(SQL.execute(filter))\n return len(result)\n\n\ndef choose_role(group_name: str) -> str:\n \"\"\"\n Function which checks quantity of moderators in group and returns name on role chosen\n\n :param group_name: user group name which is used in filter\n \"\"\"\n filter = f\"SELECT user_id FROM users WHERE group_name = '{group_name}' AND role = 'moderator'\"\n result = reformation_data.reformat_list(SQL.execute(filter))\n if len(result) < 3:\n return \"moderator\"\n else:\n return \"user\"\n\n\ndef check_user_group(user_id: int) -> str:\n \"\"\"\n Function which returns name of group by user\n\n :param user_id: user id which is used in filter\n \"\"\"\n filter = f\"SELECT group_name FROM users WHERE user_id = '{user_id}'\"\n result = reformation_data.reformat_str(SQL.execute(filter))\n return result\n\n\ndef add_user(user_name: str, user_surname: str, user_nickname: str, user_id: int, user_group: str,\n user_schedule: int, user_role: str):\n \"\"\"\n Function which adds new user and new group in list of groups or updates data which already exists\n\n :param user_name: user`s name which is inserting into database\n :param user_surname: user`s surname which is inserting into database\n :param user_nickname: user`s nickname which is inserting into database\n :param user_id: user`s id which is inserting into database\n :param user_group: user`s group which is inserting into database\n :param user_schedule: user`s schedule which is inserting into database\n :param user_role: user`s role which is inserting into database\n \"\"\"\n if check_user(user_id) is False:\n filter = f\"INSERT INTO users ('user_id', 'group_name', 'schedule_switch', 'role') VALUES ('{user_id}', '{user_group}', '{user_schedule}','{user_role}')\"\n SQL.table_operate(filter)\n filter = f\"INSERT INTO info_users ('user_id', 'user_name', 'user_surname', 'user_nickname') VALUES ('{user_id}', '{user_name}', '{user_surname}','{user_nickname}')\"\n SQL.table_operate(filter)\n if check_group(user_group) is False:\n add_new_group(user_group)\n else:\n filter = f\"UPDATE users SET group_name = '{user_group}', schedule_switch = '{user_schedule}', role = '{user_role}' WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n filter = f\"UPDATE info_users SET user_name = '{user_name}', user_surname = '{user_surname}', user_nickname = '{user_nickname}' WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n logger.info(f\"New user: {user_name}, {user_nickname} added.\")\n db_function.add_log(f\"New user: {user_name}, {user_nickname} added.\")\n\n\ndef update_schedule_switch(user_id: int, schedule_switch: int):\n \"\"\"\n Function to change schedule_switch by user_id 0 -> no schedule 1 -> only morning 2 -> morning and evening\n\n :param user_id: user id which is used in filter\n :param schedule_switch: new schedule switch to update\n \"\"\"\n if check_user(user_id) is True:\n filter = f\"UPDATE users SET schedule_switch = '{schedule_switch}' WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n\n\ndef change_group(user_id: int, group_name: str, role: str):\n \"\"\"\n Function to change user group\n\n :param user_id: user id which is used in filter\n :param group_name: new user group_name to update\n :param role: new role of user to update\n \"\"\"\n if check_user(user_id) is True and check_user_group(user_id) != group_name:\n filter = f\"UPDATE users SET group_name = '{group_name}', role = '{role}' WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n\n\ndef list_lessons(group: str) -> list:\n \"\"\"\n Function to execute list of lessons\n\n :param group: user group name which is used in filter\n \"\"\"\n filter = f\"SELECT subject FROM info_professor WHERE group_name = '{group}'\"\n return reformation_data.reformat_list(SQL.execute(filter))\n\n\ndef add_new_group(group: str):\n \"\"\"\n Function to add new group in list of groups\n\n :param group: new group name to insert into database\n \"\"\"\n filter = f\"INSERT INTO list_groups ('group_name') VALUES ('{group}')\"\n SQL.table_operate(filter)\n parser = parsing.Parser()\n data = parser.parse(group=group)\n for week in data:\n db_function.inserter_schedule(week, group, data)\n db_function.inserter_professor(week, group, data)\n db_function.add_log(f\"New group {group} added, schedule parsed.\")\n\n\ndef delete_group(group: str):\n \"\"\"\n Function to delete groups which not existed in list of groups\n\n :param group: group name to delete from database\n \"\"\"\n filter = f\"DELETE FROM list_groups WHERE group_name = '{group}'\"\n SQL.table_operate(filter)\n db_function.add_log(f\"Group {group} deleted.\")\n\n\ndef change_is_blocked(user_id: int):\n \"\"\"\n Function to change user attribute 'is_blocked' to identify inactive users\n\n :param user_id: user id which is used in filter\n \"\"\"\n filter = f\"UPDATE users SET is_blocked = 1 WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n\n\ndef deleting_blocked_user():\n \"\"\" Function to delete inactive users \"\"\"\n filter = \"SELECT user_id FROM users WHERE is_blocked = 1\"\n list_user_id = reformation_data.reformat_list(SQL.execute(filter))\n for user_id in list_user_id:\n filter = f\"DELETE FROM users WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n filter = f\"DELETE FROM info_users WHERE user_id = '{user_id}'\"\n SQL.table_operate(filter)\n db_function.add_log(f\"User: {user_id} has been deleted\")\n logger.info(f\"Blocked users has been deleted.\")\n","repo_name":"Napchik/TeleBot_by_noPIP","sub_path":"Database/db_function_user.py","file_name":"db_function_user.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"3987258422","text":"import numpy as np\n\nimport torch\nimport pickle\nimport time\nimport os\n\nfrom TFPRec import TFPRec\n\n\ndef train_TFPRec(train_set, test_set, h_params, vocab_size, device, city_code):\n\n model_path = f\"./results/{city_code}_model\"\n log_path = f\"./results/{city_code}_log\"\n meta_path = f\"./results/{city_code}_meta\"\n\n print(\"parameters:\", h_params)\n file = open(log_path, 'wb')\n pickle.dump(h_params, file)\n file.close()\n\n # construct model\n rec_model = TFPRec(\n vocab_size = vocab_size,\n f_embed_size = h_params['embed_size'],\n num_encoder_layers = h_params['tfp_layer_num'],\n num_lstm_layers = h_params['lstm_layer_num'],\n num_heads = h_params['head_num'],\n forward_expansion = h_params['expansion'],\n dropout_p = h_params['dropout'],\n back_step = h_params['future_step'],\n aux_train = h_params['aux'],\n mask_prop = h_params['mask_prop']\n )\n\n rec_model = rec_model.to(device)\n\n # continue with previous training\n start_epoch = 0\n if os.path.isfile(model_path):\n rec_model.load_state_dict(torch.load(model_path))\n rec_model.train()\n\n # load training epoch\n meta_file = open(meta_path, \"rb\")\n start_epoch=pickle.load(meta_file) + 1\n meta_file.close()\n\n params = list(rec_model.parameters())\n optimizer = torch.optim.Adam(params, lr=h_params['lr'])\n\n loss_dict, recalls, ndcgs, maps = {}, {}, {}, {}\n\n for i in range(start_epoch, h_params['epoch']):\n begin_time = time.time()\n total_loss = 0.\n for sample in train_set:\n \n sample_to_device = []\n for seq in sample:\n features = torch.tensor(seq[:5]).to(device)\n dist_matrix = torch.tensor(seq[5]).to(device)\n \n sample_to_device.append((features, dist_matrix))\n \n loss, _ = rec_model(sample_to_device)\n total_loss += loss.detach().cpu()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # test\n # if i%10==0:\n recall, ndcg, map = test_TFPRec(test_set, rec_model)\n recalls[i] = recall\n ndcgs[i] = ndcg\n maps[i] = map\n \n # record avg loss\n avg_loss = total_loss / len(train_set)\n loss_dict[i] = avg_loss\n print(f\"epoch: {i}; average loss: {avg_loss}, time taken: {int(time.time()-begin_time)}s\")\n # save model\n torch.save(rec_model.state_dict(), model_path)\n # save last epoch\n meta_file = open(meta_path, 'wb')\n pickle.dump(i, meta_file)\n meta_file.close()\n \n \n # early stop\n past_10_loss = list(loss_dict.values())[-11:-1]\n if len(past_10_loss)>10 and abs(total_loss - np.mean(past_10_loss)) < h_params['loss_delta']:\n print(f\"***Early stop at epoch {i}***\")\n break\n\n file = open(log_path, 'wb')\n pickle.dump(loss_dict, file)\n pickle.dump(recalls, file)\n pickle.dump(ndcgs, file)\n pickle.dump(maps, file)\n file.close()\n\n print(\"============================\")\n\n\ndef test_TFPRec(test_set, rec_model, ks=[1,5,10]):\n\n def calc_recall(labels, preds, k):\n return torch.sum(torch.sum(labels==preds[:,:k], dim=1))/labels.shape[0]\n \n def calc_ndcg(labels, preds, k):\n exist_pos = (preds[:,:k] == labels).nonzero()[:,1] + 1\n ndcg = 1/torch.log2(exist_pos+1)\n return torch.sum(ndcg) / labels.shape[0]\n\n def calc_map(labels, preds, k):\n exist_pos = (preds[:,:k] == labels).nonzero()[:,1] + 1\n map = 1/exist_pos\n return torch.sum(map) / labels.shape[0]\n\n preds, labels = [], []\n for sample in test_set:\n sample_to_device = []\n for seq in sample:\n features = torch.tensor(seq[:5]).to(device)\n dist_matrix = torch.tensor(seq[5]).to(device)\n \n sample_to_device.append((features, dist_matrix))\n \n pred, label = rec_model.predict(sample_to_device)\n preds.append(pred.detach())\n labels.append(label.detach())\n preds = torch.stack(preds, dim=0)\n labels = torch.unsqueeze(torch.stack(labels, dim=0), 1)\n\n recalls, NDCGs, MAPs = {}, {}, {}\n for k in ks:\n recalls[k] = calc_recall(labels, preds, k)\n NDCGs[k] = calc_ndcg(labels, preds, k)\n MAPs[k] = calc_map(labels, preds, k)\n print(f\"Recall @{k} : {recalls[k]},\\tNDCG@{k} : {NDCGs[k]},\\tMAP@{k} : {MAPs[k]}\")\n \n return recalls, NDCGs, MAPs\n\n\nif __name__ == '__main__':\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n city_index = input(\"Please select the city: \\n\\n1. New York, \\n2. Singapore, \\n3. Pheonix\\n\")\n if city_index == '1':\n city = 'NY'\n elif city_index == '2':\n city = 'SIN'\n elif city_index == '3':\n city = 'PHO'\n else:\n raise Exception(\"Invalid City Code Selected\")\n\n # get parameters\n h_params = {}\n h_params['expansion'] = 4\n h_params['future_step'] = 1\n h_params['aux'] = True\n h_params['mask_prop'] = 0.1\n h_params['lr'] = 1e-4\n h_params['epoch'] = 100\n h_params['loss_delta'] = 1e-3\n\n\n # read training data\n file = open(f\"./processed_data/{city}_train\", 'rb')\n train_set=pickle.load(file)\n file = open(f\"./processed_data/{city}_valid\", 'rb')\n valid_set=pickle.load(file)\n\n # read meta data\n file = open(f\"./processed_data/{city}_meta\", 'rb')\n meta = pickle.load(file)\n file.close()\n\n vocab_size = {}\n vocab_size[\"POI\"] = torch.tensor(len(meta[\"POI\"])).to(device)\n vocab_size[\"cat\"] = torch.tensor(len(meta[\"cat\"])).to(device)\n vocab_size[\"user\"] = torch.tensor(len(meta[\"user\"])).to(device)\n vocab_size[\"hour\"] = torch.tensor(len(meta[\"hour\"])).to(device)\n vocab_size[\"day\"] = torch.tensor(len(meta[\"day\"])).to(device)\n \n # adjust specific parameters for each city\n if city == 'SIN':\n # SIN param\n h_params['embed_size'] = 20\n h_params['tfp_layer_num'] = 1\n h_params['lstm_layer_num'] = 3\n h_params['dropout'] = 0.2\n h_params['head_num'] = 1\n\n elif city == 'PHO':\n # PHO param\n h_params['embed_size'] = 20\n h_params['tfp_layer_num'] = 4\n h_params['lstm_layer_num'] = 2\n h_params['dropout'] = 0.2\n h_params['head_num'] = 1\n\n elif city == 'NY':\n # NY param\n h_params['embed_size'] = 20\n h_params['tfp_layer_num'] = 1\n h_params['lstm_layer_num'] = 2\n h_params['dropout'] = 0.1\n h_params['head_num'] = 1\n\n # create output folder\n if not os.path.isdir('./results'):\n os.mkdir(\"./results\") \n\n train_TFPRec(train_set, valid_set, h_params, vocab_size, device, city_code=city)","repo_name":"wuziqi2/CFPRec","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"28"}
+{"seq_id":"18230480244","text":"from distutils.core import setup\nimport os.path\n\nREADME = os.path.join(os.path.dirname(__file__), 'README.md')\n\nversion = '0.2.0'\n\nwith open(README) as fp:\n longdesc = fp.read()\n\nsetup(name='thinrpc',\n include_package_data=True,\n version=version,\n description='A Lightweight RPC framework for Python',\n long_description=longdesc,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Software Development',\n 'Intended Audience :: Developers'\n ],\n author='Anson Rosenthal',\n author_email='anson.rosenthal@gmail.com',\n license='MIT License',\n url='https://github.com/anrosent/thinrpc.git',\n packages=['thinrpc']\n)\n\n","repo_name":"anrosent/thinrpc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"5602299617","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nUsed for predictor training.\r\n\r\nTODO: Some further notes are needed here.\r\n\r\nThis file is created by QIAO RUKUN on April.18, 2019.\r\n\r\n\"\"\"\r\n\r\n# System operation package\r\nimport sys\r\nimport configparser\r\nimport os\r\n\r\n# PyTorch & NumPy\r\nfrom torch.utils.data import DataLoader\r\nimport torch\r\nimport numpy as np\r\nimport visdom\r\n\r\n# My package\r\nimport Module.visual_module as vm\r\nfrom Module.depth_net import DepthNet\r\nfrom Module.data_set_loader import FlowDataSet, SampleSet\r\n\r\n\r\ndef lr_change(epoch):\r\n epoch = epoch // config.getint('NetworkPara', 'lr_period')\r\n return config.getfloat('NetworkPara', 'lr_base') ** epoch\r\n\r\n\r\ndef check_nan(network, save_path=None):\r\n\r\n flag = True\r\n param_list = list(network.parameters())\r\n for idx in range(0, len(param_list)):\r\n param = param_list[idx]\r\n if torch.isnan(param).any().item():\r\n flag = False\r\n break\r\n if param.grad is not None and torch.isnan(param.grad).any().item():\r\n flag = False\r\n break\r\n try:\r\n assert flag\r\n except AssertionError as inst:\r\n if save_path:\r\n torch.save(network.state_dict(), save_path)\r\n print(inst)\r\n raise\r\n\r\n\r\ndef spatial_train():\r\n\r\n # Step 0: Set data_loader, visual\r\n # --------------------------------------------\r\n # DataLoader and parameters\r\n depth_range = [float(x) for x in config.get('DataLoader', 'depth_range').split(',')]\r\n opts_train = {'header': config.get('DataLoader', 'opt_header').split(','),\r\n 'stride': config.getint('DataLoader', 'train_stride'),\r\n 'bias': config.getint('DataLoader', 'train_bias')}\r\n train_dataset = FlowDataSet(root_dir=config.get('FilePath', 'root_path'),\r\n list_name=config.get('DataLoader', 'train_list'),\r\n opts=opts_train)\r\n train_loader = DataLoader(train_dataset, batch_size=config.getint('Paras', 'batch_size'),\r\n shuffle=True, num_workers=0)\r\n # opts_test = {'header': config.get('DataLoader', 'opt_header').split(','),\r\n # 'stride': config.getint('DataLoader', 'test_stride'),\r\n # 'bias': config.getint('DataLoader', 'test_bias')}\r\n # test_dataset = FlowDataSet(root_dir=config.get('FilePath', 'root_path'),\r\n # list_name=config.get('DataLoader', 'test_list'),\r\n # opts=opts_test)\r\n # test_loader = DataLoader(test_dataset, batch_size=config.getint('Paras', 'batch_size'),\r\n # shuffle=True, num_workers=0)\r\n\r\n # Visdom setting\r\n vis_env = config.get('Paras', 'vis_env')\r\n vis = visdom.Visdom(env=vis_env)\r\n\r\n print('Step 0: DataSet initialize finished.')\r\n # print(' DataLoader size (tr/te): (%d/%d).' % (len(train_loader), len(test_loader)))\r\n print(' DataLoader size: %d' % len(train_loader))\r\n\r\n # Step 1: Create network model, Optimizers\r\n # -----------------------------------------------\r\n\r\n # Loss function\r\n rigid_loss = torch.nn.L1Loss()\r\n\r\n # Network\r\n depth_net = DepthNet(alpha_range=depth_range)\r\n flag_set = [False]\r\n if os.path.exists(config.get('FilePath', 'depth_model') + '.pt'):\r\n flag_set[0] = True\r\n depth_net.load_state_dict(torch.load(config.get('FilePath', 'depth_model') + '.pt'), strict=False)\r\n depth_net.train()\r\n assert check_nan(depth_net)\r\n if cuda:\r\n depth_net = depth_net.cuda()\r\n rigid_loss = rigid_loss.cuda()\r\n print('Step 1: Network finished. Load model: ', flag_set)\r\n\r\n # Optimizers\r\n g_lr = config.getfloat('NetworkPara', 'predictor_lr')\r\n optimizer_g = torch.optim.RMSprop(params=depth_net.parameters(), lr=g_lr)\r\n schedular_g = torch.optim.lr_scheduler.LambdaLR(optimizer_g, lr_lambda=lr_change)\r\n print('Step 2: Optimizers setting finished.')\r\n\r\n # Step 2: Main loop. Including training/testing/visualization/storage\r\n # ----------------------------------------------------------------------\r\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\r\n report_period = config.getint('Paras', 'report_period')\r\n save_period = config.getint('Paras', 'save_period')\r\n iter_times = 0\r\n for epoch in range(config.getint('Paras', 'start_epoch'), config.getint('Paras', 'total_epoch')):\r\n # ................. #\r\n # 2.1 Train part #\r\n # ................. #\r\n g_loss_running = 0.0\r\n g_loss_epoch = 0.0\r\n schedular_g.step()\r\n for i, data in enumerate(train_loader, 0):\r\n # Get data\r\n data = SampleSet(data)\r\n if cuda:\r\n data.to_cuda()\r\n else:\r\n data.to_cpu()\r\n idx_vec = data['idx']\r\n depth_cam = data['depth_cam']\r\n mask_cam = data['mask_cam']\r\n flow1_cv = data['flow1_cv']\r\n mask_flow1 = data['mask_flow1']\r\n\r\n # Train Easy Version: Use depth map for error calculation.\r\n optimizer_g.zero_grad()\r\n depth_est = depth_net(flow1_cv)\r\n g_loss = rigid_loss(depth_est.masked_select(mask_flow1), depth_cam.masked_select(mask_flow1))\r\n g_loss.backward()\r\n optimizer_g.step()\r\n g_loss_running += g_loss.item()\r\n g_loss_epoch += g_loss.item()\r\n iter_times += 1\r\n\r\n # Report: draw depth map and loss line.\r\n now_lr = optimizer_g.param_groups[0]['lr']\r\n report_info = vm.iter_report(vis=vis, win_set=config['WinSet'], input_set=(\r\n (iter_times, g_loss_running, now_lr),))\r\n g_loss_running = 0\r\n print(report_info)\r\n\r\n # Visualization:\r\n if (i + 1) % report_period == 0:\r\n vm.show_report(vis=vis, win_set=config['WinSet'], input_set=((depth_est, depth_cam, mask_cam),))\r\n g_loss_running = 0\r\n check_nan(depth_net, save_path=config.get('FilePath', 'depth_model') + '_error.pt')\r\n\r\n # 2.2 Epoch visualization:\r\n print('Epoch[%d] finished.' % epoch)\r\n epoch_loss = g_loss_epoch / len(train_loader)\r\n vm.epoch_report(vis, config['WinSet'], input_set=((epoch, epoch_loss),))\r\n\r\n # Check Parameter nan number\r\n check_nan(depth_net, save_path=config.get('FilePath', 'depth_model') + '_error.pt')\r\n\r\n # Save\r\n if epoch % save_period == save_period - 1:\r\n torch.save(depth_net.state_dict(),\r\n ''.join([config.get('FilePath', 'save_model'),\r\n config.get('FilePath', 'depth_model'),\r\n str(epoch),\r\n '.pt']))\r\n print(' Save model at epoch %d.' % epoch)\r\n torch.save(depth_net.state_dict(), config.get('FilePath', 'depth_model') + '.pt')\r\n\r\n # ------------\r\n # Test part:\r\n # ------------\r\n # with torch.no_grad():\r\n # g_loss_test = 0\r\n # for i, data in enumerate(test_loader, 0):\r\n # # 1. Get data\r\n # data = SampleSet(data)\r\n # if cuda:\r\n # data.to_cuda()\r\n # else:\r\n # data.to_cpu()\r\n # mask_cam = data['mask_cam']\r\n # disp_cam = data['disp_cam']\r\n # cor_xc = data['cor_xc']\r\n # cor_xc_t = data['cor_xc_t']\r\n # cor_yc = data['cor_yc']\r\n # cor_yc_t = data['cor_yc_t']\r\n # mask_pro = data['mask_pro']\r\n # flow_mat, mask_flow = flow_estimator.get_flow_value(disp_cam, mask_cam, cor_xc_t, cor_yc_t, mask_pro,\r\n # cor_xc, cor_yc)\r\n # op_center = flow_estimator.get_op_center(flow_mat, mask_flow)\r\n # flow_estimator.set_cam_info(op_center)\r\n #\r\n # # Test Easy Version:\r\n # alpha_mat = depth_net(flow_mat)\r\n # disp_fake, disp_fake_t, mask_flow_t = flow_estimator.alpha2disps(alpha_mat, flow_mat, mask_flow)\r\n # g_loss = rigid_loss(disp_fake.masked_select(mask_flow.byte()), disp_cam.masked_select(mask_flow.byte()))\r\n # g_loss_test += g_loss.item()\r\n # print('.', end='', flush=True)\r\n # # Save\r\n # np.save('show_output/disp_real_test%d.npy' % (i + 1), disp_cam.cpu().numpy())\r\n # np.save('show_output/disp_fake_test%d.npy' % (i + 1), disp_fake.cpu().numpy())\r\n # np.save('show_output/mask_flow_test%d.npy' % (i + 1), mask_flow.cpu().numpy())\r\n #\r\n # report_info = vm.iter_visual_test(vis=vis, win_set=config['WinSet'], input_set=(\r\n # (epoch, len(test_loader)),\r\n # (g_loss_test, g_loss_test)))\r\n # print(report_info)\r\n\r\n print('Step 3: Finish training.')\r\n\r\n\r\nif __name__ == '__main__':\r\n assert len(sys.argv) >= 2\r\n config_path = sys.argv[1]\r\n\r\n config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())\r\n config.read(config_path)\r\n\r\n cuda = True if torch.cuda.is_available() else False\r\n\r\n spatial_train()\r\n","repo_name":"CodePointer/SLDLRecon","sub_path":"train_0418.py","file_name":"train_0418.py","file_ext":"py","file_size_in_byte":9364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"71794325834","text":"import os\nimport adsk.core, adsk.fusion, traceback\nimport sys\nimport csv\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom fusion_mujoco_py.cohort import make_combinations\nfrom fusion_mujoco_py.export import write_stls_and_mojoco_xml\n\ndef capture_screenshot(output_folder, instance_name):\n app = adsk.core.Application.get()\n app.activeViewport.fit()\n app.activeViewport.saveAsImageFile(os.path.join(output_folder, '{}.jpg'.format(instance_name)), 640, 640);\n\ndef set_render_workspace():\n app = adsk.core.Application.get()\n ui = app.userInterface\n for workspace in ui.workspaces:\n if workspace.name == 'Render':\n workspace.activate()\n break\n\ndef set_camera():\n app = adsk.core.Application.get()\n cam = app.activeViewport.camera\n cam.eye = adsk.core.Point3D.create(100, -100, 20)\n cam.target = adsk.core.Point3D.create(0, 0, 0)\n cam.upVector = adsk.core.Vector3D.create(0, 0, 1)\n cam.cameraType = adsk.core.CameraTypes.PerspectiveCameraType\n app.activeViewport.camera = cam\n app.activeViewport.fit()\n\ndef get_input_csv_filename(ui):\n csvInputDialog = ui.createFileDialog()\n csvInputDialog.isMultiSelectEnabled = False\n csvInputDialog.title = 'Input parameter ranges CSV file'\n csvInputDialog.filter = '*.csv'\n dialogResult = csvInputDialog.showOpen()\n if dialogResult == adsk.core.DialogResults.DialogOK:\n return csvInputDialog.filename\n else:\n return None\n\ndef parse_input_csv(filename):\n \"\"\"\n CSV file has 4 lines\n 1. User variable name\n 2. Min value\n 3. Max value\n 4. Step value\n \"\"\"\n file = open(filename)\n lines = file.readlines()\n assert(len(lines) == 4)\n vars = list(map(lambda x: x.strip(), lines[0].split(',')))\n mins = list(map(lambda x: x.strip(), lines[1].split(',')))\n maxs = list(map(lambda x: x.strip(), lines[2].split(',')))\n steps = list(map(lambda x: x.strip(), lines[3].split(',')))\n return (vars, mins, maxs, steps)\n\ndef generate_all_values(vars, mins, maxs, steps):\n all_values = []\n for index in range(len(vars)):\n min = float(mins[index])\n max = float(maxs[index])\n step = float(steps[index])\n values = []\n value = min\n while value <= max:\n values.append(value)\n value += step\n all_values.append((vars[index], values))\n return all_values\n\ndef run(context):\n ui = None\n try:\n app = adsk.core.Application.get()\n ui = app.userInterface\n product = app.activeProduct\n design = adsk.fusion.Design.cast(product)\n\n # Read the parameter ranges from the input CSV file\n csv_input_filename = get_input_csv_filename(ui)\n if not csv_input_filename:\n return\n output_directory = os.path.dirname(csv_input_filename)\n print('output directory:', output_directory)\n # once-off viewport setup\n set_render_workspace()\n set_camera()\n\n (vars, mins, maxs, steps) = parse_input_csv(csv_input_filename)\n all_values = generate_all_values(vars, mins, maxs, steps)\n combinations = make_combinations(all_values)\n\n # For each parameter combination, update the Fusion model,\n # capture a screenshot, and export the Mujoco model (which includes the\n # STL files.\n # Write a output CSV file descriping each instance\n output_csv_filename = os.path.join(output_directory, 'instances.csv')\n with open(output_csv_filename, 'w', newline='\\n') as output_csv_file:\n output_csv_writer = csv.writer(output_csv_file)\n for idx, combination in enumerate(combinations):\n # CSV Header. Combination have the same sequence\n # of variable names\n if (idx == 0):\n header = list(map(lambda x: x[0], combination))\n output_csv_writer.writerow(header)\n output_csv_writer.writerow(map(lambda x: x[1], combination))\n instance_name = str(idx)\n instance_output_directory = os.path.join(output_directory, instance_name)\n try:\n os.mkdir(instance_output_directory)\n os.mkdir(os.path.join(instance_output_directory, 'stl'))\n except FileExistsError as e:\n print('overwriting existing STLs for instance: {}'.format(instance_name))\n for var, value in combination:\n param = design.userParameters.itemByName(var)\n param.expression = '{}mm'.format(value)\n capture_screenshot(instance_output_directory, instance_name)\n write_stls_and_mojoco_xml(design, instance_output_directory, instance_name)\n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n else:\n print('Failed:\\n{}'.format(traceback.format_exc()))\n","repo_name":"bjnortier/fusion-mujoco-py","sub_path":"fusion_script_generate_cohort/generate_cohort.py","file_name":"generate_cohort.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"24353662853","text":"#jaffer razavi\n#oct 20,2019\n#circles increasing in radius by three each time\n\nimport turtle\n\nbob = turtle.Turtle()\n\nbob.speed(0)\n\nfor i in range (0,300,3):\n bob.circle(10+i)\n","repo_name":"J-992/Reference-Work","sub_path":"Coding/CountedLoopExercises/Count9.py","file_name":"Count9.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"3860505622","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom django.db.models import F\n\nfrom rest_framework import viewsets\nfrom apps.users import serializers\nfrom apps.users.views import GeneralApiView\nfrom .models import Author, Book\nfrom django.http import Http404\nfrom .serializers import AuthorSerializer, BookSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\n# from books import serializers\n\nclass AuthorsList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = [IsAuthenticated]\n \"\"\"View a complete Author list\n\n * Requires: token authentication\n * Any user can access\n \"\"\"\n def get(self, request, format=None):\n authors = Author.objects.all()\n authors_serialized = AuthorSerializer(authors, many=True)\n return Response(authors_serialized.data)\n\n def post(self, request, format=None):\n serializer = AuthorSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass AuthorDetailView(APIView):\n \"\"\"View a Author detail\n\n * Requires: token authentication\n * Any user can access\n \"\"\"\n def get(self, request, pk=None, format=None):\n \n author = Author.objects.get(id=pk)\n author_serialized = AuthorSerializer(author, many=False)\n return Response(author_serialized.data)\n\n def put(self, request, pk=None):\n\n try:\n author = Author.objects.get(id=pk)\n except Author.DoesNotExist:\n author = None\n # raise Http404(\"Poll does not exist\")\n return Response({'message':'Id do no exist'}, status=status.HTTP_404_NOT_FOUND)\n author_serializer = AuthorSerializer(author, data=request.data)\n if author_serializer.is_valid():\n author_serializer.save()\n return Response(author_serializer.data)\n return Response(author_serializer.errors)\n\n\n def delete(self, request, pk=None):\n\n try:\n author = Author.objects.get(id=pk)\n author.delete()\n return Response({'message':'deleted'}, status=status.HTTP_404_NOT_FOUND)\n except Author.DoesNotExist:\n author = None\n return Response({'message':'Id do no exist'}, status=status.HTTP_404_NOT_FOUND)\n \n\nclass BooksListView(GeneralApiView):\n serializer_class = BookSerializer\n\n# this view can be used to list and create\nclass BooksCreateView(generics.ListCreateAPIView):\n serializer_class = BookSerializer \n queryset = BookSerializer.Meta.model.objects.all()\n # intercept the post before response\n def post(self, request):\n serializer = self.serializer_class(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({'message': 'done'})\n return Response(serializer.errors)\n\nclass BooksRetrieveApiView(generics.RetrieveAPIView):\n serializer_class = BookSerializer \n def get_queryset(self):\n return self.get_serializer().Meta.model.objects.all()\n\n # using manual get overwriting get method, its just alternative\n # def get(self, request, pk):\n # resp = self.get_queryset().get(id=pk)\n # resp = self.serializer_class(resp).data\n # return Response(resp)\n\nclass BooksDeleteApiView(generics.DestroyAPIView):\n serializer_class = BookSerializer \n def get_queryset(self):\n return self.get_serializer().Meta.model.objects.all()\n \n # in this case only deactivate a book this could be mean book desapear for user but not from DB\n def delete(self, request, pk=None): \n book = self.get_queryset().filter(id=pk).first()\n if book:\n book.is_available = False\n book.save()\n return Response({'message': 'Deactivated'}, status=status.HTTP_200_OK)\n return Response({'message': 'Product not found'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass BookUpdateApiView(generics.UpdateAPIView):\n serializer_class = BookSerializer \n def get_queryset(self):\n return self.get_serializer().Meta.model.objects.all()\n# class BooksListView(APIView):\n \n# def get(self, request):\n# books = Book.objects.all()\n# books = books.values(\n# 'id',\n# 'author__full_name',\n# author_name=F('author__full_name')\n# )\n# return Response(books)\n\nclass BookViewSet(viewsets.ModelViewSet):\n serializer_class = BookSerializer\n queryset = BookSerializer.Meta.model.objects.all()","repo_name":"dairof7/library","sub_path":"apps/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"13179292395","text":"import pickle\nimport os,re,json\nimport snownlp,jieba,xpinyin\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\ntemppinyin=xpinyin.Pinyin()\n\nindex_vo=['b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j', 'q', 'x', 'zh', 'ch', 'sh', 'r', 'z', 'c', 's', 'y', 'w', '']\nindex_co=['a', 'o', 'e', 'i', 'u', 'v', 'ai', 'ei', 'ui', 'ao', 'ou', 'uo', 'ia', 'iu', 'ie', 've', 'er', \\\n 'an', 'en', 'in', 'un', 'vn', 'ang', 'eng', 'ing', 'ong', 'ian', 'iang', 'iao', 'iong', 'ua', 'uan', 'uai', 'uang','ue']\n\nhuge_qa_character={}\nwords_qa_character={}\nviterbi_qa_character={}\nvisit_qa_character={}\n\ndef doublevocheck(temp_str):\n if len(temp_str)<=1:\n return False\n if temp_str[1]=='h' and (temp_str[0]=='z' or temp_str[0]=='s' or temp_str[0]=='c'):\n return True\n return False\n\ndef novocheck(temp_str):\n if temp_str[0]=='a' or temp_str[0]=='e' or temp_str[0]=='i' or temp_str[0]=='o' or temp_str[0]=='u':\n return True\n return False\n\ndef read_chr():\n global huge_qa_character,words_qa_character,viterbi_qa_character,visit_qa_character\n if os.path.exists('huge_qa_chara.pkl')==True:\n with open('huge_qa_chara.pkl','rb') as pfile:\n huge_qa_character=pickle.load(pfile)\n \n if os.path.exists('huge_qa_words.pkl')==True:\n with open('huge_qa_words.pkl','rb') as qfile:\n words_qa_character=pickle.load(qfile)\n \n if os.path.exists('viterbi_qa_words.pkl')==True:\n with open('viterbi_qa_words.pkl','rb') as rfile:\n viterbi_qa_character=pickle.load(rfile) \n \n if os.path.exists('visit_qa_words.pkl')==True:\n with open('visit_qa_words.pkl','rb') as sfile:\n visit_qa_character=pickle.load(sfile) \n\ndef addkey(characters):\n if temppinyin.get_pinyin(characters)==characters:\n return\n global vo_idx,co_idx,huge_qa_character\n now_pinyin=temppinyin.get_pinyin(characters)\n if now_pinyin!=characters:\n if doublevocheck(now_pinyin)==True:\n vo_idx=now_pinyin[0:2]\n co_idx=now_pinyin[2:]\n elif novocheck(now_pinyin)==True:\n vo_idx=''\n co_idx=now_pinyin\n else:\n vo_idx=now_pinyin[0]\n co_idx=now_pinyin[1:]\n # print(str(vo_idx)+\" \"+str(co_idx))\n try:\n vo_idx_num=index_vo.index(vo_idx)\n co_idx_num=index_co.index(co_idx)\n except:\n return\n huge_key=(vo_idx_num,co_idx_num)\n if huge_key not in huge_qa_character:\n huge_qa_character[huge_key]={}\n goal_dict=huge_qa_character[huge_key]\n chara_ord=ord(characters)\n if chara_ord not in goal_dict:\n goal_dict[0]=1\n goal_dict[chara_ord]=1\n else:\n goal_dict[0]+=1\n goal_dict[chara_ord]+=1\n return\n\ndef wordsaddkey(characters):\n global temppinyin,words_qa_character\n for ele in characters:\n if temppinyin.get_pinyin(ele)==ele:\n return\n now_pinyin=temppinyin.get_pinyin(characters,splitter=\"\")\n # print(now_pinyin)\n if now_pinyin not in words_qa_character:\n words_qa_character[now_pinyin]={}\n goal_dict= words_qa_character[now_pinyin]\n second_key=[]\n for ech in characters:\n second_key.append(ord(ech))\n second_key=tuple(second_key)\n if second_key not in goal_dict:\n goal_dict[0]=1\n goal_dict[second_key]=1\n else:\n goal_dict[0]+=1\n goal_dict[second_key]+=1\n return\n\ndef viterbi_dictionary(characters:str,next_str:str or None,if_first=False):\n global vo_idx,co_idx,viterbi_qa_character\n if temppinyin.get_pinyin(characters)==characters:\n return\n if next_str!=None and temppinyin.get_pinyin(next_str)==next_str:\n return\n try:\n now_pinyin=temppinyin.get_pinyin(characters,splitter=\"\")\n if now_pinyin not in viterbi_qa_character:\n viterbi_qa_character[now_pinyin]={}\n chrpinyin_dict=viterbi_qa_character[now_pinyin]\n secondkey=ord(characters)\n if secondkey not in chrpinyin_dict:\n chrpinyin_dict[secondkey]={0:0,1:0,2:0}\n goal_dict=chrpinyin_dict[secondkey]\n goal_dict[0]+=1\n if if_first==True:\n goal_dict[1]+=1\n if next_str==None:\n goal_dict[2]+=1\n next_ord=ord(next_str)\n if next_ord not in goal_dict:\n goal_dict[next_ord]=0\n goal_dict[next_ord]+=1\n return\n except:\n return\n \n'''Read the Files'''\nread_chr()\n\n\nqa_list=[]\ncnt=-1\nwith open('baike_qa_train.json',encoding='utf-8') as jsonf:\n for lines in jsonf.readlines():\n cnt+=1\n qa_list.append(json.loads(lines))\n if cnt%100==0:\n print(cnt)\n if cnt not in visit_qa_character:\n visit_qa_character[cnt]=1\n else:\n continue\n temp_strlist=qa_list[cnt]\n title_str=temp_strlist['title']\n desc_str=temp_strlist['desc']\n answer_str=temp_strlist['answer']\n \n title_text=re.split(\"\\n\",title_str)\n for eachtext in title_text:\n sentences=re.split(\",|。|?|!|\\?|\\.|\\!\",eachtext)\n # print(eachtext)\n for characters in eachtext: \n addkey(characters)\n # pass\n for each_sentences in sentences: #用句号/分号拆成的分句\n temp_segment=list(jieba.cut(each_sentences))\n # print(temp_segment)\n for ele in temp_segment:\n wordsaddkey(ele)\n \n textlen=len(eachtext)\n for k in range(textlen):\n if k!=textlen-1:\n viterbi_dictionary(eachtext[k],eachtext[k+1],k==0)\n else:\n viterbi_dictionary(eachtext[k],None,k==0)\n \n \n desc_text=re.split(\"\\n\",desc_str)\n for eachtext in desc_text:\n sentences=re.split(\",|。|?|!|\\?|\\.|\\!\",eachtext)\n # print(eachtext)\n for characters in eachtext: \n addkey(characters)\n # pass\n for each_sentences in sentences: #用句号/分号拆成的分句\n temp_segment=list(jieba.cut(each_sentences))\n # print(temp_segment)\n for ele in temp_segment:\n wordsaddkey(ele)\n \n textlen=len(eachtext)\n for k in range(textlen):\n if k!=textlen-1:\n viterbi_dictionary(eachtext[k],eachtext[k+1],k==0)\n else:\n viterbi_dictionary(eachtext[k],None,k==0)\n \n \n answer_text=re.split(\"\\n\",answer_str)\n for eachtext in answer_text:\n sentences=re.split(\",|。|?|!|\\?|\\.|\\!\",eachtext)\n # print(eachtext)\n for characters in eachtext: \n addkey(characters)\n # pass\n for each_sentences in sentences: #用句号/分号拆成的分句\n temp_segment=list(jieba.cut(each_sentences))\n # print(temp_segment)\n for ele in temp_segment:\n wordsaddkey(ele)\n \n textlen=len(eachtext)\n for k in range(textlen):\n if k!=textlen-1:\n viterbi_dictionary(eachtext[k],eachtext[k+1],k==0)\n else:\n viterbi_dictionary(eachtext[k],None,k==0) \n \n fileopener=open(\"huge_qa_chara.pkl\",'wb')\n pickle.dump(huge_qa_character,fileopener)\n fileopener.close()\n\n fileopener2=open(\"huge_qa_words.pkl\",'wb')\n pickle.dump(words_qa_character,fileopener2)\n fileopener2.close()\n \n fileopener3=open(\"viterbi_qa_words.pkl\",'wb')\n pickle.dump(viterbi_qa_character,fileopener3)\n fileopener3.close()\n \n fileopener4=open(\"visit_qa_words.pkl\",'wb')\n pickle.dump(visit_qa_character,fileopener4) \n fileopener4.close()\n if cnt%10000==9999:\n fileopener=open(\"huge_qa_chara\"+str(cnt)+\".pkl\",'wb')\n pickle.dump(huge_qa_character,fileopener)\n fileopener.close()\n\n fileopener2=open(\"huge_qa_words\"+str(cnt)+\".pkl\",'wb')\n pickle.dump(words_qa_character,fileopener2)\n fileopener2.close()\n \n fileopener3=open(\"viterbi_qa_words\"+str(cnt)+\".pkl\",'wb')\n pickle.dump(viterbi_qa_character,fileopener3)\n fileopener3.close()\n \n fileopener4=open(\"visit_qa_words\"+str(cnt)+\".pkl\",'wb')\n pickle.dump(visit_qa_character,fileopener4) \n fileopener4.close()\n ","repo_name":"paperplane03/NAI_inputter","sub_path":"Training/forum_open.py","file_name":"forum_open.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"32650596627","text":"\"\"\"\nAfficher la table de multiplication d'un nombre\non continue avec la bouche for, cette fois-ci pour afficher la table de multiplication\nd'un nombre.\n\"\"\"\nnombre = 7\n\nfor i in range(11):\n print(f\"{i} x {nombre} = {i * nombre}\")\n\n\"\"\"\n XPLICATION\n\nIci, nous faisons tous les calculs nécessaires directement à l'intérieur de la méthode format.\n\nPour commencer, nous bouclons à travers une liste contenant les nombres de 0 à 10, grâce à la fonction range :\n\n for i in range(11):\n\nNous affichons ensuite dans la chaîne de caractère formattée, le nombre courant de la boucle, contenu dans la\nvariable i, le nombre pour lequel nous affichons la table de multiplication, contenu dans la variable nombre,\npuis la multiplication de l'un par l'autre (i * nombre).\n\n POINTS IMPORTANTS À RETENIR\n\n Il est possible de faire des opérations mathématiques directement à l'intérieur de la méthode format,\n afin d'insérer le résultat de ces opérations à l'intérieur d'une chaîne de caractère.\n\"\"\"","repo_name":"hollerith47/101_py_exercises","sub_path":"1.Debutant/43.py","file_name":"43.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"39942506058","text":"from django.shortcuts import render\nimport requests\nimport datetime\n\n# Create your views here.\ndef index (request):\n\n if 'city' in request.POST:\n city=request.POST['city']\n else:\n city='Delhi'\n\n\n\n appid='94b7a5f9be5713db1341cb08ac45a003'\n URL='https://api.openweathermap.org/data/2.5/weather'\n PARAMS={'q':city,'appid':appid,'units':'metric'}\n r=requests.get(url=URL,params=PARAMS)\n res=r.json()\n print(res)\n code = res[\"cod\"]\n if code == \"404\":\n description=\"Enter Valid City Name\"\n icon=\"\"\n temp=\"\"\n day=\"\"\n else:\n description = res[\"weather\"][0]['description']\n icon=res[\"weather\"][0]['icon']\n temp=res['main']['temp']\n day=datetime.date.today()\n\n\n\n return render(request,'weatherapp/index.html',{\n 'description':description,'icon':icon,'temp':temp,'day':day,'city':city, 'code':code})","repo_name":"NandanRavi/weatherapp","sub_path":"weatherapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"40444554043","text":"# Ноль или не ноль\n# Проверьте, есть ли среди данных N чисел нули.\n#\n# Входные данные\n#\n# Вводится число N, а затем N чисел.\n#\n# Выходные данные\n#\n# Выведите YES, если среди введенных чисел есть хотя бы один нуль, или NO в противном случае.\n#\n# Sample Input:\n#\n# 3\n# 1\n# 0\n# 2\n# Sample Output:\n#\n# YES\n\nn = int(input());\n\nsum = 0;\n\nfor i in range(0, n):\n a = int(input());\n if a == 0:\n sum +=1;\nif sum > 0:\n print('YES');\nelse:\n print('NO');","repo_name":"andrei-barinov/programming_tasks","sub_path":"Задачи по программированию/Оператор for/Ноль или не ноль.py","file_name":"Ноль или не ноль.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"4021243883","text":"import datetime\r\n\r\nclass Msg_User():\r\n user_details = []\r\n messages = []\r\n common_msg = \"Hi {name},\\nThank you for the purchase on {date}.\\nWe hope you are excited about using the product.\\nJust as a remainder the purchase total was ${amount}.\\nHave a great day.\\n\\n Team CFE\"\r\n def add_usr(self,usr_name,usr_bill):\r\n today = datetime.date.today()\r\n todays_date = '{today.day}/{today.month}/{today.year}'.format(today=today)\r\n name = usr_name.capitalize()\r\n amt = \"%.2f\" %(usr_bill)\r\n detail = {\r\n \"name\": name, \r\n \"amt\" : amt\r\n }\r\n detail[\"date\"] = todays_date\r\n self.user_details.append(detail)\r\n \r\n def make_msgs(self):\r\n if len(self.user_details) > 0 :\r\n for detail in self.user_details :\r\n name = detail[\"name\"]\r\n amount = detail[\"amt\"]\r\n date = detail[\"date\"]\r\n message = self.common_msg\r\n new_msg = message.format(name=name,amount=amount,date=date)\r\n self.messages.append(new_msg)\r\n return self.messages\r\n \r\n\r\nobj = Msg_User()\r\nobj.add_usr('John',100)\r\nobj.add_usr('jIm',80)\r\nobj.add_usr('MiKE',75)\r\nobj.add_usr('cena',50)\r\nobj.add_usr('maT',100)\r\nprint(obj.user_details)\r\nmademsgs = obj.make_msgs()\r\nprint(mademsgs)","repo_name":"navinng/Python","sub_path":"Billing Message.py","file_name":"Billing Message.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"40017110518","text":"# Approach 1: Maintain Array Sum\n\n# Intuition and Algorithm:\n# Let's try to maintain S, the sum of the array throughout one query operation.\n# When acting on an array element nums[index], the rest of the values of nums remain the same. Let's remove \n# nums[index] from S (if it is even), then add nums[index] + val back (if it is even.)\n\nfrom typing import List\n\nclass Solution:\n def sumEvenAfterQueries(self, nums: List[int], queries: List[List[int]]) -> List[int]:\n S = sum( x for x in nums if x % 2 == 0)\n ans = []\n\n for val, idx in queries:\n if nums[idx] % 2 == 0:\n S -= nums[idx] \n nums[idx] += val\n\n if nums[idx] % 2 == 0:\n S += nums[idx]\n ans.append(S)\n \n return ans\n \n\nnums = [1,2,3,4]\nqueries = [[1,0],[-3,1],[-4,0],[2,3]]\nobj = Solution()\nprint(obj.sumEvenAfterQueries(nums, queries))\n\n\n# Complexity Analysis:\n# Time Complexity: O(N + Q), where N is the length of nums and Q is the number of queries.\n# Space complexity : O(Q), though we only allocate O(1) additional space.","repo_name":"nitishprabhu26/LeetcodeSolutions","sub_path":"0985_sum_of_even_numbers_after_queries/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"}
+{"seq_id":"70729913036","text":"from collections import defaultdict\nfrom sys import argv\n\n\n# class Registers inherit from defaultdict\nclass Registers(defaultdict):\n def __init__(self):\n super(Registers, self).__init__(lambda: 0)\n\n def apply(self, instruction):\n if instruction.is_valid(self):\n self[instruction.adress] += instruction.sign * instruction.value\n\n\nclass Instruction:\n def __init__(self, adress, signString, value, condition):\n self.adress = adress\n self.sign = 1 if signString == \"inc\" else -1\n self.value = value\n self.condition = condition\n\n def is_valid(self, registers):\n return (self.condition).is_valid(registers)\n\n\nclass Condition:\n def __init__(self, adress, operand, rhs):\n self.adress = adress\n self.operand = operand\n self.rhs = rhs\n\n def is_valid(self, registers):\n if self.operand == \">\":\n return registers[self.adress] > self.rhs\n elif self.operand == \">=\":\n return registers[self.adress] >= self.rhs\n elif self.operand == \"<\":\n return registers[self.adress] < self.rhs\n elif self.operand == \"<=\":\n return registers[self.adress] <= self.rhs\n elif self.operand == \"==\":\n return registers[self.adress] == self.rhs\n elif self.operand == \"!=\":\n return registers[self.adress] != self.rhs\n else:\n print(\"Invalid operand \", self.operand)\n exit()\n\n\nif __name__ == '__main__':\n\n # dict of registers init to 0\n registers = Registers()\n\n # list of instructions\n instructions = []\n\n input_data = open(argv[1], 'r')\n for line in input_data:\n # 1. Finding condition\n adress = line.split()[-3]\n operand = line.split()[-2]\n rhs = int(line.rstrip('\\n').split()[-1])\n condition = Condition(adress, operand, rhs)\n\n # 2. Creating instruction\n adress = line.split()[0]\n signString = line.split()[1]\n value = int(line.split()[2])\n instructions.append(Instruction(adress, signString, value, condition))\n\n input_data.close()\n\n max_val = 0\n for instr in instructions:\n registers.apply(instr)\n max_val = max([max_val] + list(registers.values()))\n print(\"Part 1 : \", max(registers.values()))\n print(\"Part 2 : \", max_val)\n","repo_name":"Blaxav/Challenges-algo","sub_path":"AOC2018/Xavier/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"9804838029","text":"def bank(x, y):\n for each_year in range(y):\n x = round(x * 1.1)\n return round(x, 2)\n\nx = int(input(\"Сумма вклада? \"))\ny = int(input(\"На сколько лет? \"))\n\nprint(\"Вы получите: \", bank(x, y))\n\n","repo_name":"shinki90/homework","sub_path":"leson_2_homework/lesson_2_task_10.py","file_name":"lesson_2_task_10.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"70629583754","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 11 15:38:02 2023\n\n@author: adityaswarup\n\"\"\"\n\nimport cdsapi\nimport os\n\npath = 'ENTER YOUR WORKING DIRECTORY HERE' # Please verify where your files are being downloaded.\n\n\ndef data_download(variables, start_year, end_year, path):\n \n '''\n Please refer to the following documentation for ERA5-Land\n https://confluence.ecmwf.int/display/CKB/ERA5-Land%3A+data+documentation#ERA5Land:datadocumentation-Dataformat\n \n To get the api request, enter the relevant details in the following CDS\n webpage:\n https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-land?tab=form\n \n Please ensure that you have a valid login.\n \n Definition\n ----------\n data_download(variables, start_year, end_year)\n\n Input Format Description\n ----- ----- -----------\n variables List of Str A list of variables as outlined by ECMWF \n ERA5Land documentation.\n \n start_year Int The year from which you want to start\n data downloads.\n \n end_year Int The year from which you want to end \n data downloads.\n \n path Str The folder in which your data will be \n downloaded.\n \n \n Output Format Description\n ----- ----- -----------\n netcdf.zip file Multiple files are outputted. The number \n of files outputted is related to your \n variables, start_year, and end_year. The \n files will be named by variable and year.\n \n Message Str Checks in your specified directory whether \n or not your file has been downloaded. \n \n Example\n --------\n >>> variables = [\"total_precipitation\", \"2m_temperature\"]\n \n >>> data_download(variables, 2015, 2023)\n \n total_precipitation_2015.netcdf.zip has been downloaded!\n total_precipitation_2016.netcdf.zip has been downloaded!\n total_precipitation_2017.netcdf.zip has been downloaded!\n total_precipitation_2018.netcdf.zip has been downloaded!\n total_precipitation_2019.netcdf.zip has been downloaded!\n total_precipitation_2020.netcdf.zip has been downloaded!\n total_precipitation_2021.netcdf.zip has been downloaded!\n total_precipitation_2022.netcdf.zip has been downloaded!\n total_precipitation_2023.netcdf.zip has been downloaded!\n 2m_temperature_2015.netcdf.zip has been downloaded!\n 2m_temperature_2016.netcdf.zip has been downloaded!\n 2m_temperature_2017.netcdf.zip has been downloaded!\n 2m_temperature_2018.netcdf.zip has been downloaded!\n 2m_temperature_2019.netcdf.zip has been downloaded!\n 2m_temperature_2020.netcdf.zip has been downloaded!\n 2m_temperature_2021.netcdf.zip has been downloaded!\n 2m_temperature_2022.netcdf.zip has been downloaded!\n 2m_temperature_2023.netcdf.zip has been downloaded!\n \n You will notice the following files downloaded in your specified location:\n total_precipitation_2015.netcdf.zip\n total_precipitation_2016.netcdf.zip\n total_precipitation_2017.netcdf.zip\n total_precipitation_2018.netcdf.zip\n total_precipitation_2019.netcdf.zip\n total_precipitation_2020.netcdf.zip\n total_precipitation_2021.netcdf.zip\n total_precipitation_2022.netcdf.zip\n total_precipitation_2023.netcdf.zip\n 2m_temperature_2015.netcdf.zip\n 2m_temperature_2016.netcdf.zip\n 2m_temperature_2017.netcdf.zip\n 2m_temperature_2018.netcdf.zip\n 2m_temperature_2019.netcdf.zip\n 2m_temperature_2020.netcdf.zip\n 2m_temperature_2021.netcdf.zip\n 2m_temperature_2022.netcdf.zip\n 2m_temperature_2023.netcdf.zip\n \n For more information on this code visit:\n https://github.com/AdityaSwarup/ERA5-Land_data_downloads\n \n History\n -------\n Written by - Aditya Swarup Sep 2023\n '''\n \n c = cdsapi.Client()\n \n assert os.path.exists(path), \\\n \"Path unspecified. Please ensure that you have checked where your files will be downloaded. If so, input it in the variable 'path'.\"\n \n \n i = start_year\n \n years = []\n \n while i <= end_year:\n years.append(str(i))\n i += 1\n \n lst_of_files = [] # A list of all your downloaded files should occur here.\n \n for variable in variables: \n for year in years: \n c.retrieve(\n 'reanalysis-era5-land',\n {\n 'variable': [\n variable,\n ],\n 'year': year, \n 'month': ['01','02','03','04','05','06','07','08','09','10','11','12',],\n 'day': [\n '01', '02', '03',\n '04', '05', '06',\n '07', '08', '09',\n '10', '11', '12',\n '13', '14', '15',\n '16', '17', '18',\n '19', '20', '21',\n '22', '23', '24',\n '25', '26', '27',\n '28', '29', '30',\n '31',\n ],\n 'time': [\n '00:00', '01:00', '02:00',\n '03:00', '04:00', '05:00',\n '06:00', '07:00', '08:00',\n '09:00', '10:00', '11:00',\n '12:00', '13:00', '14:00',\n '15:00', '16:00', '17:00',\n '18:00', '19:00', '20:00',\n '21:00', '22:00', '23:00',\n ],\n 'area': [\n 30, -20, 27,\n -18,\n ],\n 'format': 'netcdf.zip',\n },\n variable + '_' + year + '.netcdf.zip')\n \n lst_of_files.append(variable + '_' + year + '.netcdf.zip')\n \n for file in lst_of_files:\n if os.path.isfile(path + '/' + file):\n print(file + \" has been downloaded!\")\n \n else:\n print('There has been an issue with' + file + \n \". Please check your path folder and potentially redownload the file.\")\n \n\n\n\n# Testing!! \n###############################################################################\n\n# Test 1 (Raising an AssertionError due to path not specified.): \n# lst_of_variables = ['10m_u_component_of_wind', '10m_v_component_of_wind'] \n# path_unspecified = 'testing'\n# data_download(lst_of_variables, 2020, 2023, path_unspecified) #Should return AssertionError\n\n# Test 2 (All files have been downloaded)\n# lst_of_variables = ['10m_u_component_of_wind', '10m_v_component_of_wind'] \n# path_correct = '/Users/adityaswarup/.spyder-py3/ERA5 test'\n# data_download(lst_of_variables, 2020, 2023, path_correct) #Should download all files + print messages saying it has been downloaded.\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"AdityaSwarup/ERA5-Land_data_downloads","sub_path":"ERA5-Land_download_script.py","file_name":"ERA5-Land_download_script.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"13373663887","text":"import pandas as pd\nfrom gensim.models.word2vec import Word2Vec\nimport numpy as np\nfrom nlpops import Char2ID\n\ncorpus1 = ['帮我-关掉-借呗-可以-吗', '蚂蚁-借呗-提前-还-了-分期,到期-后-还要-还-这期-吗']\ncorpus2 = ['天猫店-都-能用-花呗-吗', '我-用-花呗-买东西,隔天-就-还款-了。为什么-现在-又-扣款']\ncorpus3 = ['天猫店-都-能用-花呗-吗', '我-用-花呗-买东西,隔天-就-还款-了。为什么-现在-又-扣款']\n\ncorpus1c = ['帮我关掉借呗可以吗', '蚂蚁借呗提前还了分期,到期后还要还这期吗']\ncorpus2c = ['天猫店都能用花呗吗', '我用花呗买东西,隔天就还款了。为什么现在又扣款']\ncorpus3c = ['天猫店都能用花呗吗', '我用花呗买东西,隔天就还款了。为什么现在又扣款']\n\nchar2id = Char2ID.load(r'data/char2id_test.txt').char2id_dict\n\n\"\"\"\nw2v_mod_sents = [x.split('-') for x in corpus1] + [x.split('-') for x in corpus2]\nw2v_mod = Word2Vec(sentences=w2v_mod_sents, size=256, window=3, min_count=1)\nw2v_mod.save(r'data/w2v_test_mod')\n\"\"\"\n\nw2v_mod = Word2Vec.load(r'data/w2v_test_mod')\n\ntrain = pd.DataFrame({'q1_cut': [corpus1[0]],\n 'q2_cut': [corpus1[1]],\n 'q1_chars': [[char2id[x] for x in corpus1c[0]]],\n 'q2_chars': [[char2id[x] for x in corpus1c[1]]],\n 'flag': [0]})\n\nvalid = pd.DataFrame({'q1_cut': [corpus2[0]],\n 'q2_cut': [corpus2[1]],\n 'q1_chars': [[char2id[x] for x in corpus2c[0]]],\n 'q2_chars': [[char2id[x] for x in corpus2c[1]]],\n 'flag': [0]})\n\ntest = pd.DataFrame({'q1_cut': [corpus3[0]],\n 'q2_cut': [corpus3[1]],\n 'q1_chars': [[char2id[x] for x in corpus2c[0]]],\n 'q2_chars': [[char2id[x] for x in corpus2c[1]]],\n 'flag': [0]})\n\ndata = [train, valid, test]\ncols = ['q1_cut', 'q2_cut']\n\nfor p_data in data:\n for col in cols:\n p_data[col] = p_data[col].apply(lambda x: x.split('-'))\n p_data[col] = p_data[col].apply(lambda x: np.array([w2v_mod[z] for z in x]))\n\n# Train shape: [length_of_train, 30, 256]\ntrain_input = [np.zeros((len(train), 30, 256)) for _ in range(2)] + [np.zeros((len(train), 30)) for _ in range(2)]\n\nfor index, row in train.iterrows():\n uw = row['q1_cut'][0:30]\n vw = row['q2_cut'][0:30]\n uc = row['q1_chars'][0:30]\n vc = row['q2_chars'][0:30]\n train_input[0][index][0:len(uw)] = uw\n train_input[1][index][0:len(vw)] = vw\n train_input[2][index][0:len(uc)] = uc\n train_input[3][index][0:len(vc)] = vc\n\nfor x in train_input:\n print(x)\n","repo_name":"BridgeMia/Web-learning-of-Mia","sub_path":"new_process.py","file_name":"new_process.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"}
+{"seq_id":"25909429648","text":"from pyramid import config as c\nfrom pyramid.view import (view_config,\n forbidden_view_config,\n notfound_view_config)\nfrom pyramid.httpexceptions import (\n HTTPFound,\n HTTPForbidden,\n HTTPNotFound,\n HTTPUnauthorized\n )\nfrom pyramid.security import remember, forget, authenticated_userid\nfrom pyramid.events import subscriber, BeforeRender\n\nfrom sqlalchemy.exc import DBAPIError\nfrom sqlalchemy.orm.exc import NoResultFound\nimport transaction\n\nimport markdown\nfrom wtforms import Form\n\nfrom passlib.hash import bcrypt\n\nfrom models.tables import (\n DBSession,\n Posts,\n Users,\n Categories\n )\n\nfrom models.forms import (\n LoginForm, \n UserForm, \n EditUserForm, \n PostForm,\n CategoryForm\n )\n\nfrom .security import verify_password\n\n@subscriber(BeforeRender)\ndef add_globals(event):\n \"\"\"Store the username in the application registry. This should probably use\n a session instead.\n \"\"\"\n\n username = authenticated_userid(event['request'])\n if username:\n c.user = DBSession.query(Users)\\\n .filter(Users.name == username).first()\n else:\n c.user = None\n event['c'] = c\n\n\n# This could be a generalized page-viewer. This would probably entail moving\n# some Python to the template, which is kind of icky. I'll have to play with\n# this.\n@view_config(route_name='home', renderer='home.mako')\ndef home(request):\n \"\"\"Get the posts for /home.\"\"\"\n\n posts = DBSession.query(Posts, Users, Categories)\\\n .join(Users, Posts.authorid==Users.id)\\\n .join(Categories, Posts.categoryid==Categories.id)\n posts = posts.order_by(Posts.id.desc()).all()\n \n return {'posts': posts}\n\n@view_config(route_name='login', renderer='login.mako')\n@forbidden_view_config(renderer='login.mako')\ndef login(request):\n \"\"\"If the user came from this page and has submitted the form, try to log\n them in. Otherwise, give them the login page.\n \"\"\"\n\n login_url = request.route_url('login')\n referrer = request.url\n if login_url == referrer:\n referrer = '/'\n\n form = LoginForm(request.POST)\n if form.came_from.data:\n came_from = form.came_from.data\n else:\n came_from = referrer\n\n if request.POST and form.validate():\n username = form.username.data\n password = form.password.data\n if verify_password(username, password):\n headers = remember(request, username)\n return HTTPFound(location=came_from, headers=headers)\n else:\n message = 'Incorrect login information.'\n return {'message': message,\n 'username': username,\n 'form': form,\n 'came_from': came_from,\n 'url': request.route_url('login')\n }\n else:\n return {'message': '',\n 'username': '',\n 'form': form,\n 'came_from': came_from,\n 'url': request.route_url('login')\n }\n\n@view_config(route_name='logout')\ndef logout(request):\n \"\"\"View for /logout, which ends the user's session.\"\"\"\n\n headers = forget(request)\n return HTTPFound(location=request.route_url('home'), headers=headers)\n\n@view_config(route_name='new_post', renderer='new_post.mako',\n permission='edit')\ndef new_post(request):\n \"\"\"View for /post/new. If the post has already been submitted, try to add it\n to the database and display a success message. Otherwise, show the new post\n form.\n \"\"\"\n\n form = PostForm(request.POST)\n post_content = None\n if request.POST and form.validate():\n title = form.title.data\n category = form.category.data\n post_content = form.post_content.data\n \n users_db = DBSession.query(Users)\n author = users_db.filter(Users.name == authenticated_userid(request))\n \n new_post = Posts(title=title,\n authorid=author.first().id,\n categoryid=category,\n post=post_content)\n \n with transaction.manager:\n DBSession.add(new_post)\n \n return {'form': form, \n 'post': post_content} #this should be run through markdown renderer\n\n@view_config(route_name='edit_post', renderer='edit_post.mako',\n permission='edit')\ndef edit_post(request):\n \"\"\"View for /post/edit. If the form's already been submitted, change it in\n the database. Otherwise, show the edit post form.\n \"\"\"\n\n post_id = request.matchdict['pid']\n post = DBSession.query(Posts).filter(Posts.id == post_id).first()\n form = PostForm(request.POST)\n if request.POST and form.validate():\n title = form.title.data\n category = form.category.data\n post_content = form.post_content.data\n\n post.title = title\n post.post = post_content\n post.categoryid = category\n with transaction.manager:\n DBSession.add(post)\n return HTTPFound(location=request.route_url('view_post', pid=post_id))\n else:\n return {'form': form,\n 'post': post}\n\n### wtforms probably wouldn't hurt here either.\n@view_config(route_name='delete_post', renderer='delete_post.mako',\n permission='edit')\ndef delete_post(request):\n \"\"\"View for /post/delete. If the user has already confirmed that they want\n to delete the post, delete it. Otherwise, ask them if they're sure.\n \"\"\"\n\n post_id = request.matchdict['pid']\n post = DBSession.query(Posts).filter(Posts.id == post_id).first()\n if 'submitted' in request.params:\n with transaction.manager:\n DBSession.delete(post)\n return {'url': request.route_url('home'),\n 'post': None,\n 'message': 'Post successfully deleted.'}\n else:\n if 'no_delete' in request.params:\n url = request.route_url('home')\n post = None\n message = 'The post was not deleted.'\n else:\n url = request.route_url('delete_post', pid=post_id)\n message = 'Are you sure you want to delete this post?'\n return {'url': url,\n 'post': post,\n 'message': message}\n\n@view_config(route_name='view_post', renderer='view_post.mako')\ndef view_post(request):\n \"\"\"View for /post/view. Display a single post, with its comments thread.\"\"\"\n\n post_id = request.matchdict['pid']\n post = DBSession.query(Posts).filter(Posts.id == post_id).one()\n if post is None:\n message = 'The post you requested does not exist.'\n else:\n message = False\n return {'message': message, 'post': post}\n\n@view_config(route_name='new_category', renderer='new_category.mako',\n permission='admin')\ndef new_category(request):\n \"\"\"View for /category/new. Adds a category.\"\"\"\n\n form = CategoryForm(request.POST)\n if request.POST and form.validate():\n name = form.name.data\n new_category = Categories(name)\n with transaction.manager:\n DBSession.add(new_category)\n return HTTPFound(location=request.route_url('home'))\n else:\n return {'form': form}\n\n@view_config(route_name='edit_category', renderer='edit_category.mako',\n permission='admin')\ndef edit_category(request):\n \"\"\"View for /category/edit. Edit or delete a category.\"\"\"\n\n category_id = request.matchdict['cid']\n category = DBSession.query(Categories).filter(\\\n Categories.id == category_id).first()\n form = EditCategoryForm(request.POST)\n if request.POST and form.validate():\n if form.delete.data:\n with transaction.manager:\n DBSession.remove(category)\n else:\n category.name = form.name.data\n with transaction.manager:\n DBSession.add(category)\n return HTTPFound(location=request.route_url('home'))\n else:\n return {'form': form,\n 'category': category}\n\n# When there are no posts in the category, this page just tells you so. I think\n# it would be sensible to offer to delete the category. This would suggest some\n# kind of utility function, because it would duplicate some functionality in\n# edit_category.\n@view_config(route_name='view_category', renderer='home.mako')\ndef view_category(request):\n \"\"\"View for /category/view. Display the posts in a single category.\"\"\"\n\n category_id = request.matchdict['cid']\n posts = DBSession.query(Posts, Users, Categories)\n posts = posts.filter(Posts.categoryid == category_id)\n posts = posts.filter(Posts.authorid == Users.id)\n posts = posts.filter(Posts.categoryid == Categories.id)\n posts = posts.order_by(Posts.id.desc()).all()\n return {'posts': posts}\n\n@view_config(route_name='new_user', renderer='new_user.mako', permission='admin')\ndef new_user(request):\n \"\"\"View for /user/new. Add a new user!\"\"\"\n\n form = UserForm(request.POST)\n if request.POST and form.validate():\n password = bcrypt.encrypt(form.password.data)\n user = Users(form.name.data, form.group.data, password)\n DBSession.add(user)\n return HTTPFound(location=request.route_url('home'))\n return {'form': form}\n\n@view_config(route_name='edit_user', renderer='edit_user.mako',\n permission='edit')\ndef edit_user(request):\n \"\"\"View for /user/edit. Edit a user's attributes.\"\"\"\n\n active_user = DBSession.query(Users)\\\n .filter(Users.name == authenticated_userid(request)).one()\n user_id = request.matchdict['uid']\n try:\n user = DBSession.query(Users).filter(Users.id == user_id).one()\n except NoResultFound:\n raise HTTPNotFound()\n if active_user.group != 0 and active_user is not user:\n raise HTTPUnauthorized()\n form = EditUserForm(request.POST)\n if request.POST and form.validate():\n if not form.delete.data:\n password = bcrypt.encrypt(form.password.data)\n user.name = form.name.data\n user.password = password\n DBSession.add(user)\n return HTTPFound(location=request.route_url('view_user', uid=user.id))\n else:\n DBSession.delete(user)\n return HTTPFound(location=request.route_url('home'))\n return {'form': form, 'user': user}\n\n# Since this is a blog, this should probably list their posts.\n@view_config(route_name='view_user', renderer='view_user.mako')\ndef view_user(request):\n \"\"\"View for /user/view. Look at a user's profile.\"\"\"\n\n user_id = request.matchdict['uid']\n try:\n user = DBSession.query(Users)\\\n .filter(Users.id == user_id)\\\n .one()\n posts = DBSession.query(Posts)\\\n .filter(Posts.authorid == user_id)\\\n .all()\n except:\n raise HTTPNotFound()\n \n return {'user': user, 'posts': posts}\n\n@view_config(context=HTTPUnauthorized, renderer='401.mako')\ndef unauthorized(request):\n \"\"\"View for a 401 error. This is mostly for unauthenticated users, but it\n also gets called when somebody doesn't have rights to do something. See\n edit_user. That may not be the best way to do it, though.\n \"\"\"\n\n return {}\n\n@notfound_view_config(append_slash=True, renderer='404.mako')\ndef notfound(request):\n \"\"\"View for a 404 error.\"\"\"\n\n return {}\n","repo_name":"zmarvel/coffeespot","sub_path":"coffeespot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"31617405210","text":"from heapq import *\nnumber_test_case = int(input())\ndef solutions():\n n = int(input())\n monsters = [int(num) for num in input().split()]\n heap = []\n for mon in monsters:\n heappush(heap, mon)\n res = 0\n while heap:\n curr = heappop(heap)\n res += 1\n if curr == 1:\n if heap:\n nxt = heappop(heap)\n nxt -= 1\n if nxt != 0:\n heappush(heap, nxt)\n print(res)\n\nfor _ in range(number_test_case):\n solutions()\n","repo_name":"evan-dayy/Decal-Codeforces","sub_path":"03-B.py","file_name":"03-B.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"}
+{"seq_id":"32215066634","text":"#calcular la distancia entre dos puntos \ni=1\nwhile i <=2:\n x=int(input(\"ingrese el valor del punto x\" + str(i)+ str (\"= \")))\n y=int(input(\"ingrese el valor del punto y\" + str(i)+ str (\"=\")))\n if x > 0 and y > 0:\n if i >0 or i <=2:\n if i==1:\n x1= x\n y1=y\n if i==2:\n x2=x\n y2=y\n i=i+1\n else:\n print(\"ingreso un valor incorrecto, vuelva a intentarlo\")\ndistancia= (((x2-x1)**2) + ((y2-y1)**2))**0.5\nprint(\"la distancia entre los dos puntos es = \", distancia)","repo_name":"Breynerr/taller-30-abril","sub_path":"ejercicio#16.py","file_name":"ejercicio#16.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"35967294000","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\nfrom CodernityDB.database import Database\nfrom CodernityDB.hash_index import HashIndex\nfrom ByPlatform.StorageHelper.StorageType import StorageType\n# from StorageType import *\n# from WithXIndex import *\nimport os\nfrom ByPlatform.Base.OutPutHelper import *\n\n\nclass StorageHelper(object):\n '''\n 用于本地存储类,存储采用本地文档模式,存储格式为json格式,类似Mongodb\n '''\n def __init__(self ,log = None,dbname = None):\n self.__dbRoot = \"datas\"\n self.__dbName = dbname\n self.__DBHandle = None\n self.openFlag = False\n self.loggerHandle = log\n\n if self.__dbName:\n self.CreateDB()\n pass\n\n def GetDBHandle(self):\n if not self.__DBHandle.opened:\n self.__DBHandle.open()\n return self.__DBHandle\n\n def StartStorageServce(self):\n '''\n 开启存储服务\n :return:\n '''\n\n # 已经被初始化了,直接返回\n if self.openFlag and self.__DBHandle:\n return True,\"Success\"\n\n if not self.__dbName:\n return False, \"Unset DB Name\"\n\n # 未进行任何初始化\n if not self.__DBHandle:\n result = self.OpenDb()\n if not result or not self.__DBHandle :\n return False ,\"Open LocalDB Failed\"\n\n self.openFlag = True\n\n return True,\"Success\"\n\n def StopStorageServce(self):\n '''\n 关闭存储服务\n :return:\n '''\n # 已经处于关闭状态\n if not self.openFlag and not self.__DBHandle:\n return True\n\n # 句柄有效,但状态不对\n if self.__DBHandle:\n self.__DBHandle.close()\n self.__DBHandle = None\n\n if self.openFlag:\n self.openFlag = False\n\n return True\n\n def GetStorageState(self):\n '''\n 获取设备存储状态\n :return:字典: 当前文件、存储服务开启状态\n '''\n\n if self.__DBHandle:\n return self.__DBHandle.path, self.openFlag\n else:\n return None, False\n\n def __getModel(self):\n return StorageType.getModel(self.__dbName)\n\n def InsertData(self,dictDatas):\n '''\n 写入数据\n :param dictDatas:\n :return:\n '''\n if not self.openFlag:\n return False,\"Write Service Not Open\"\n\n if not self.__DBHandle:\n return False, \"Write Handle Exception\"\n\n # 数据确认与验证\n dictDatas = StorageType.ValidData(self.__getModel(),dictDatas)\n\n try:\n self.__DBHandle.insert(dictDatas)\n return True,\"Success\"\n except Exception as ex:\n OutPutHelper.consolePrint(ex.message)\n\n def UpdateData(self,dictDatas):\n '''\n 写入数据\n :param dictDatas:\n :return:\n '''\n if not self.openFlag:\n return False,\"Write Service Not Open\"\n\n if not self.__DBHandle:\n return False, \"Write Handle Exception\"\n\n # 数据确认与验证\n # dictDatas = StorageType.ValidData(self.__getModel(),dictDatas)\n\n try:\n self.__DBHandle.update(dictDatas)\n return True,\"Success\"\n except Exception as ex:\n OutPutHelper.consolePrint(ex.message)\n\n def CreateDB(self):\n '''\n 创建nosql数据库\n :param dbName:\n :return:\n '''\n\n db = Database(os.path.join(self.__dbRoot,self.__dbName))\n\n if db.exists():\n return True,\"DB Exist\"\n try:\n # OutPutHelper.consolePrint(\"Create DB=%s, dbpath=%s\"% (self.__dbName ,db.create()))\n db.create()\n # if indexname:\n # x_ind = WithXIndex(db.path, indexname)\n # db.add_index(x_ind)\n except Exception as ex:\n return False,\"Create DB Failed\"\n db.close()\n\n return True ,\"Success\"\n pass\n\n\n def OpenDb(self):\n '''\n 打开当前数据库\n :param dbName:数据库名称\n :return:\n '''\n self.__DBHandle = Database(os.path.join(self.__dbRoot,self.__dbName))\n\n self.__DBHandle.open()\n\n if not self.__DBHandle.exists():\n self.__DBHandle = None\n return False,\"DB Not Exist\"\n\n try:\n self.__DBHandle.open()\n except:\n return False,\"Open DB Failed\"\n\n return True,\"Success\"\n\n\n\n def RemoveData(self,data):\n pass\n\n @staticmethod\n def storageDatas(dbname, datas):\n storageHandle = StorageHelper(None,dbname)\n storageHandle.StartStorageServce()\n storageHandle.InsertData(datas)\n pass\n","repo_name":"caojiaju-2017/HsPlatform","sub_path":"ByPlatform/StorageHelper/StorageHelper.py","file_name":"StorageHelper.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"15232418836","text":"import config\nimport utils\nimport numpy as np\nfrom scipy.stats import invgamma, truncnorm\nimport healpy as hp\nimport time\nfrom default_gibbs import sample_cls\n\n\nclass GibbsSampler():\n def __init__(self, pix_map, noise, beam_fwhm_deg, nside, lmax, Npix, polarization = False, bins=None, n_iter = 10000,\n gibbs_cr = False, rj_step = False):\n self.noise = noise\n self.beam = beam_fwhm_deg\n self.nside = nside\n self.lmax = lmax\n self.polarization = polarization\n self.bins = bins\n self.pix_map = pix_map\n self.Npix = Npix\n self.bl_map = self.compute_bl_map(beam_fwhm_deg)\n self.constrained_sampler = None\n self.cls_sampler = None\n self.n_iter = n_iter\n self.gibbs_cr = gibbs_cr\n self.rj_step = rj_step\n if bins is None:\n if not polarization:\n self.bins = np.array([l for l in range(lmax+2)])\n else:\n bins = np.array([l for l in range(2, lmax + 1)])\n self.bins = {\"TT\":bins, \"EE\":bins, \"TE\":bins, \"BB\":bins}\n else:\n self.bins = bins\n\n self.dls_to_cls_array = np.array([2*np.pi/(l*(l+1)) if l !=0 else 0 for l in range(lmax+1)])\n\n def dls_to_cls(self, dls_):\n return dls_[:]*self.dls_to_cls_array\n\n def compute_bl_map(self, beam_fwhm):\n fwhm_radians = (np.pi / 180) * beam_fwhm\n bl_gauss = hp.gauss_beam(fwhm=fwhm_radians, lmax=self.lmax)\n bl_map = np.concatenate([bl_gauss,np.array([cl for m in range(1, self.lmax + 1) for cl in bl_gauss[m:] for _ in range(2)])])\n return bl_map\n\n def run_temperature(self, dls_init):\n h_accept_cr = []\n h_dls = []\n h_time_seconds = []\n binned_dls = dls_init\n dls = utils.unfold_bins(binned_dls, config.bins)\n cls = self.dls_to_cls(dls)\n var_cls_full = utils.generate_var_cl(dls)\n skymap, accept = self.constrained_sampler.sample(cls[:], var_cls_full.copy(), None, metropolis_step=False)\n h_dls.append(binned_dls)\n for i in range(self.n_iter):\n if i % 1 == 0:\n print(\"Default Gibbs\")\n print(i)\n\n start_time = time.process_time()\n skymap, accept = self.constrained_sampler.sample(cls[:], var_cls_full.copy(), skymap, metropolis_step=False,\n use_gibbs=False)\n binned_dls = self.cls_sampler.sample(skymap[:])\n dls = utils.unfold_bins(binned_dls, self.bins)\n cls = self.dls_to_cls(dls)\n var_cls_full = utils.generate_var_cl(dls)\n\n h_accept_cr.append(accept)\n end_time = time.process_time()\n h_dls.append(binned_dls)\n h_time_seconds.append(end_time - start_time)\n\n print(\"Acception rate constrained realization:\", np.mean(h_accept_cr))\n return np.array(h_dls), np.array(h_accept_cr), h_time_seconds\n\n def run_polarization(self, dls_init):\n h_accept_cr = []\n h_duration_cr = []\n h_duration_cls_sampling = []\n h_dls = {\"EE\":[], \"BB\":[]}\n binned_dls = dls_init\n dls_unbinned = {\"EE\":utils.unfold_bins(binned_dls[\"EE\"].copy(), self.bins[\"EE\"]), \"BB\":utils.unfold_bins(binned_dls[\"BB\"].copy(), self.bins[\"BB\"])}\n print(\"RJ_STEP ?:\", self.rj_step)\n if self.rj_step == True or self.gibbs_cr == True:\n skymap, accept = self.constrained_sampler.sample(dls_unbinned)\n\n h_dls[\"EE\"].append(binned_dls[\"EE\"])\n h_dls[\"BB\"].append(binned_dls[\"BB\"])\n for i in range(self.n_iter):\n if i % 1 == 0:\n print(\"Default Gibbs\")\n print(i)\n\n start_time = time.clock()\n #if self.rj_step == False:\n # skymap, accept = self.constrained_sampler.sample(dls_unbinned.copy())\n #else:\n # skymap, accept = self.constrained_sampler.sample(dls_unbinned.copy(), skymap)\n # h_accept_cr.append(accept)\n\n if self.rj_step is False and self.gibbs_cr is False:\n skymap, _ = self.constrained_sampler.sample(dls_unbinned.copy())\n else:\n skymap, accept = self.constrained_sampler.sample(dls_unbinned, skymap)\n h_accept_cr.append(accept)\n\n end_time = time.clock()\n duration = end_time - start_time\n h_duration_cr.append(duration)\n\n start_time = time.clock()\n binned_dls = self.cls_sampler.sample(skymap.copy())\n end_time = time.clock()\n duration =end_time - start_time\n h_duration_cls_sampling.append(duration)\n dls_unbinned = {\"EE\":utils.unfold_bins(binned_dls[\"EE\"].copy(), self.bins[\"EE\"]), \"BB\":utils.unfold_bins(binned_dls[\"BB\"].copy(), self.bins[\"BB\"])}\n \n h_dls[\"EE\"].append(binned_dls[\"EE\"])\n h_dls[\"BB\"].append(binned_dls[\"BB\"])\n\n if self.rj_step == True:\n print(\"Acception rate constrained realization:\", np.mean(h_accept_cr))\n\n h_dls[\"EE\"] = np.array(h_dls[\"EE\"])\n h_dls[\"BB\"] = np.array(h_dls[\"BB\"])\n return h_dls, np.array(h_accept_cr), np.array(h_duration_cr), np.array(h_duration_cls_sampling)\n\n\n def run(self, dls_init):\n if not self.polarization:\n return self.run_temperature(dls_init)\n else:\n return self.run_polarization(dls_init)\n\n\n\n\n\n\n\n\n","repo_name":"Gabriel-Ducrocq/GibbsSampler","sub_path":".ipynb_checkpoints/GibbsSampler-checkpoint.py","file_name":"GibbsSampler-checkpoint.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"}
+{"seq_id":"35391093060","text":"import pandas as pd\n\ndf = pd.DataFrame(\n data = [{\"name\": \"남일우\", \"sex\": \"m\", \"department\": \"IT개발팀\"},\n {\"name\": \"이수미\", \"sex\": \"f\", \"department\": \"IT개발팀\"},\n {\"name\": \"정재현\", \"sex\": \"m\", \"department\": \"영상제작1팀\"},\n {\"name\": \"박수정\", \"sex\": \"f\", \"department\": \"고객서비스팀\"},\n {\"name\": \"이규봉\", \"sex\": \"m\", \"department\": \"트랜드패션팀\"},\n {\"name\": \"박문식\", \"sex\": \"m\", \"department\": \"DM팀\"},\n {\"name\": \"선수현\", \"sex\": \"f\", \"department\": \"디지털 엑셀러레이션팀\"},\n {\"name\": \"이승준\", \"sex\": \"m\", \"department\": \"COE\"},\n {\"name\": \"정은성\", \"sex\": \"m\", \"department\": \"microSVC\"}]\n , index=[0,1,2,3,4,5,6,7,8]\n)\n\ndf_2 = pd.DataFrame(\n data={\"name\": [\"남일우\", \"이수미\", \"정재현\", \"박수정\",\n \"이규봉\", \"박문식\", \"선수현\", \"이승준\", \"정은성\"],\n \"sex\": [\"m\", \"f\", \"m\", \"f\", \"m\", \"m\", \"f\", \"m\", \"m\"],\n \"department\": [\"IT개발팀\", \"IT개발팀\", \"영상제작1팀\",\n \"고객서비스팀\", \"트랜드패션팀\", \"DM팀\",\n \"디지털 엑셀러레이션팀\", \"COE\", \"microSVC\"]}\n # , index=[0,1,2,3,4,5,6,7,8]\n)\n\ndf_3 = pd.DataFrame(\n data=[[\"남일우\", \"m\", \"IT개발팀\"],\n [\"이수미\", \"f\", \"IT개발팀\"],\n [\"정재현\", \"m\", \"영상제작1팀\"],\n [\"박수정\", \"f\", \"고객서비스팀\"],\n [\"이규봉\", \"m\", \"트랜드패션팀\"],\n [\"박문식\", \"m\", \"DM팀\"],\n [\"선수현\", \"f\", \"디지털 엑셀러레이션팀\"],\n [\"이승준\", \"m\", \"COE\"],\n [\"정은성\", \"m\", \"microSVC\"]],\n columns=[\"name\", \"sex\", \"index\"],\n index=[0,1,2,3,4,5,6,7,8]\n)\n\nsquare = 0\nnumber = 1\n\nwhile square < 99:\n square = number ** 2\n print(square)\n number += 1\n\nwhile True:\n if square < 99:\n break\n square = number ** 2\n print(square)\n number += 1\n\n\ndef sum_two_numbers(a, b):\n val = a + b\n return val\n\n# c = sum_two_numbers(3, 12)\n# 실제로 벌어지는 일\n(a, b) = (3, 12)\nval = a + b\nc = val\n\nany_list = [12, -32, 231, 0, -325, 125]\n\n\ntype(df.index)\ntype(df.columns)\n# name, sex, department,\n# 남일우, m, IT개발팀,\n# 이수미, f, IT개발팀,\n# 정재현, m, 영상제작1팀,\n# 박수정, f, 고객서비스팀,\n# 이규봉, m, 트랜드패션팀,\n# 박문식, m, DM팀,\n# 선수현, f, 디지털 엑셀러레이션팀,\n# 이승준, m, COE,\n# 정은성, m, microSVC\n#\n#\n# {\"name\": \"남일우\", \"sex\": \"m\", \"department\": \"IT개발팀\"}\n# {\"name\": \"이수미\", \"sex\": \"f\", \"department\": \"IT개발팀\"}\n# {\"name\": \"정재현\", \"sex\": \"m\", \"department\": \"영상제작1팀\"}\n# {\"name\": \"박수정\", \"sex\": \"f\", \"department\": \"고객서비스팀\"}\n# {\"name\": \"이규봉\", \"sex\": \"m\", \"department\": \"트랜드패션팀\"}\n# {\"name\": \"박문식\", \"sex\": \"m\", \"department\": \"DM팀\"}\n# {\"name\": \"선수현\", \"sex\": \"f\", \"department\": \"디지털 엑셀러레이션팀\"}\n# {\"name\": \"이승준\", \"sex\": \"m\", \"department\": \"COE\"}\n# {\"name\": \"정은성\", \"sex\": \"m\", \"department\": \"microSVC\"}\n\n# df\n# df['name']\n# df['department']\n# df['sex']\n#\n# df.index\n# df.columns\n#\n# df.loc[[2, 4, 5, 7], ['name', 'sex']]\n# df.iloc[[2, 4, 5, 7], [0, 2]]\n#\n# df.loc[:, 'name']\n# df.iloc[:, 0]\n#\n# df.loc[8], df.loc[8. :]\n# df.iloc[8], df.iloc[8, :]\n#\n#\n# # 특정한 컬럼에서 some_value을 가진 값만 뽑아내기\n# df.loc[df['column_name'] == some_value]\n# df.loc[df['column_name'] != some_value]\n#\n# # 특정한 컬럼에서 some_values(복수)를 가진 값만 뽑아내기\n# df.loc[df['column_name'].isin(some_values)]\n# df.loc[~df['column_name'].isin(some_values)]\n#\n# # 한번에 여러 개의 조건을 적용시켜 뽑아내기\n# df.loc[(df['column_name'] == some_value) & df['other_column'].isin(some_values)]\n# df.loc[df['column_name'] == some_value].loc[df['other_column'].isin(some_values)]\n#\n#\n# # isin returns a boolean Series, so to select rows whose value is not in some_values, negate the boolean Series using ~:\n#\n#\n#\n# df.loc[df['sex'] == 'm']\n# df.loc[df['sex'] != 'f']\n# df.iloc[:, 0]\n# df.loc[df['sex'] == 'm', :]\n#\n#\n# df.loc[df['department'].isin(['IT개발팀', 'DM팀', 'COE', 'microSVC']), ['department', 'name']]\n# df.loc[~df['department'].isin(['영상제작1팀', '고객서비스팀', '트랜드패션팀', '디지털 엑셀러레이션팀']), ['department', 'name']]\n#\n# df.loc[df['department'].isin(['IT개발팀', '고객서비스팀']) & (df['sex'] == 'f'), ['name']]\n# df.loc[df['department'].isin(['IT개발팀', '고객서비스팀'])].loc[df['sex'] == 'f'][['name']]\n#\n# 'column_name'\n# [3]\n\ncol_dict = {\"name\": \"이름\", \"department\": \"부서\", \"untitled\" : \"무제\"}\nindex_dict = {0: \"zero\", 4: \"four\", 10: \"ten\"}\ndf = df.rename(col_dict, axis=1)\ndf = df.rename(index_dict, axis=0)\n\nstring = ' xoxo love xoxo '\n\n# Leading whitepsace are removed\nprint(string.strip())\n\n\ndf['new'] = df['col'].apply(lambda x: any_func(x))\n\nnew_list = []\nfor x in any_list:\n new_list.append(x)\n\ndf['new_col'] = df['col_name'].apply(lambda x: x)\n\ndf['new'] = df['col'].apply(lambda x: any_func(x))\n\ndef any_func(x):\n x = ...\n return x\n\ndf = df.apply(lambda x: any_func(x), axis=1)\n\n\ndf = pd.DataFrame(data=['2017.03.27', '2016.08.06', '2016.05.08',\n '2018.06.02', '2017.08.22', '2015.05.26',\n '2017.08.25', '2015.11.08', '2017.11.22',\n '2018.10.17'], columns=['date'])\n\n\ndef parse_date(row):\n the_date = row.date\n row['year'] = int(the_date[:4])\n row['month'] = int(the_date[5:7])\n row['day'] = int(the_date[-2:])\n return row\n\n\ndf = df.apply(lambda row: parse_date(row), axis=1)\n","repo_name":"joonable/playground","sub_path":"etc/20181015.py","file_name":"20181015.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"25"}
+{"seq_id":"16929652552","text":"\"\"\"Constants for the program\"\"\"\n\nHIGHEST_TEMPERATURE = 'highest_temperature'\nLOWEST_TEMPERATURE = 'lowest_temperature'\nMAXIMUM_HUMIDITY = 'maximum_humidity'\nHIGHEST_TEMPERATURE_MEAN = 'highest_temperature_mean'\nLOWEST_TEMPERATURE_MEAN = 'lowest_temperature_mean'\nMAXIMUM_HUMIDITY_MEAN = 'maximum_humidity_mean'\nDATE = 'date'\nVALUE = 'value'\nSUM = 'sum'\nCOUNT = 'count'\nMAX_TEMPERATURE = 'Max TemperatureC'\nMIN_TEMPERATURE = 'Min TemperatureC'\nMEAN_HUMIDITY = ' Mean Humidity'\nPKT = 'PKT'\nPKST = 'PKST'\nDATE_OF_THE_DAY = 'date_of_the_day'\n","repo_name":"Talha-Rizwan/weatherman-python","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"7018457941","text":"import fileinput\n\nweighted_mean = lambda values, weights: sum([v*w for (v, w) in zip(values, weights)])/sum(weights)\n\nfor counter, line in enumerate(fileinput.input()):\n if counter == 0:\n continue\n \n if counter == 1:\n values = list(map(int, line.split(\" \")))\n \n if counter == 2:\n weights = list(map(int, line.split(\" \")))\n \n w_mean = weighted_mean(values, weights)\n print(f'{w_mean:.1f}')\n","repo_name":"treyceraso/hackerrank","sub_path":"Day 0: Weighted Mean.py","file_name":"Day 0: Weighted Mean.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"24607332194","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"colorp\",\n version=\"0.0.1\",\n author=\"Siddharth Gupta\",\n author_email=\"g.sidd97@gmail.com\",\n description=\"A small package for printing colored strings\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"siddg97/colorP","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"}
+{"seq_id":"22412346085","text":"#python3.7\r\n\r\np = int(input())\r\nc=0\r\n\r\nfor i in range(2,int(p/2)+1):\r\n if(p%i==0):\r\n print(\"NotPrime\")\r\n c=1\r\n break\r\n\r\nif(c==0):\r\n print(\"Prime\")\r\n \r\n","repo_name":"Ritwik-Gupta/Python-Code-Files","sub_path":"DSA Python/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"10665960746","text":"# This script transforms Lang-8 data from Json format into XML format\r\n# Python version: 3.6.5\r\n# Usage example: python lang8_preprocess.py --dataset lang-8-20111007-L1-v2.dat --language English --id en --output lang-8-20111007-L1-v2.xml\r\n\r\nimport re,json\r\nimport langid\r\nimport argparse\r\nimport xml.etree.ElementTree as ET\r\nfrom clean_data import clean_sentence\r\n\r\nparser=argparse.ArgumentParser()\r\nparser.add_argument('--dataset', help='raw lang8 dataset (json format)')\r\nparser.add_argument('--language', help='the language that need be contained by the argument of learning language')\r\nparser.add_argument('--id', help='the langid of this language')\r\nparser.add_argument('--output', help='processed lang8 dataset (XML format)')\r\nargs=parser.parse_args()\r\n\r\n# transform re to RegexObject\r\nSLINE=re.compile(\"\\[sline\\].*?\\[\\\\\\/sline\\]\")\r\nSLINE_END=re.compile(\"\\[\\\\\\/sline\\]\")\r\nFTAG=re.compile(\"\\[f-[a-zA-Z]*\\]|\\[\\\\\\/f-[a-zA-Z]*\\]\")\r\nBACKSLASH=re.compile(r'\\\\(.)')\r\n\r\ndef remove_tags(line):\r\n line.strip()\r\n for tag in SLINE,SLINE_END,FTAG:\r\n line=tag.sub('',line)\r\n return re.sub('\\s+',' ',line)\r\n\r\ndef process(line,dataset,essay_id):\r\n changes=0\r\n row = json.loads(re.sub(r'[\\x00-\\x1F]+', '', line))\r\n if args.language != row[2]:\r\n return False, 0\r\n map=[]\r\n num=0\r\n match=0\r\n correction = False\r\n for i in range(len(row[4])):\r\n row[4][i] = re.sub('\\s+',' ', row[4][i].strip())\r\n row[4][i] = clean_sentence(row[4][i])\r\n if len(row[4][i]):\r\n num+=1\r\n s_language, _ = langid.classify(row[4][i])\r\n if s_language==args.id:\r\n match+=1\r\n if correction == False:\r\n for each in row[5][i]:\r\n if each:\r\n each = re.sub('\\s+', ' ', each.strip())\r\n each = clean_sentence(each)\r\n if len(each):\r\n t_language, _ = langid.classify(each)\r\n if t_language == args.id and row[4][i] != each:\r\n correction = True\r\n break\r\n map.append(s_language)\r\n else:\r\n map.append('null')\r\n if match < 2 or correction == False:\r\n return False, 0\r\n essay=ET.SubElement(dataset, 'essay', attrib={'id': str(essay_id), 'journal_id':row[0], 'user_id':row[1], 'learning_language':row[2], 'native_language':row[3]})\r\n sentence_id = 0\r\n for i in range(len(row[4])):\r\n if len(row[4][i]):\r\n sentence=ET.SubElement(essay,'sentence', attrib={'id':str(sentence_id)})\r\n source=ET.SubElement(sentence,'source')\r\n source.text=row[4][i]\r\n source.set(\"langid\",map[i])\r\n for each in row[5][i]:\r\n if each:\r\n each = re.sub('\\s+',' ', each.strip())\r\n each = clean_sentence(each)\r\n if len(each):\r\n target=ET.SubElement(sentence,'target')\r\n target.text=each\r\n t_language, _ = langid.classify(target.text)\r\n target.set(\"langid\",t_language)\r\n if t_language==args.id and source.text!=target.text:\r\n changes+=1\r\n sentence_id += 1\r\n return True, changes\r\n\r\ndef indent(elem, level=0):\r\n i = \"\\n\" + level*\" \"\r\n if len(elem):\r\n if not elem.text or not elem.text.strip():\r\n elem.text = i + \" \"\r\n if not elem.tail or not elem.tail.strip():\r\n elem.tail = i\r\n for elem in elem:\r\n indent(elem, level+1)\r\n if not elem.tail or not elem.tail.strip():\r\n elem.tail = i\r\n else:\r\n if level and (not elem.tail or not elem.tail.strip()):\r\n elem.tail = i\r\n\r\nessay_id=0\r\ndataset=ET.Element('dataset',attrib={'name':'lang-8-20111007-L1-v2'})\r\nwith open(args.dataset, encoding='utf-8') as f:\r\n size=0\r\n for line in f:\r\n line=remove_tags(line)\r\n judge, changes=process(line,dataset,essay_id)\r\n if judge==True:\r\n essay_id+=1\r\n size+=changes\r\n indent(dataset)\r\n tree = ET.ElementTree(dataset)\r\n tree.write(args.output, encoding='UTF-8', xml_declaration=True)\r\n print(essay_id, \"essays have been added.\")\r\n print(size, \"sentence pairs can be used.\")","repo_name":"nusnlp/crosentgec","sub_path":"scripts/lang8_preprocess.py","file_name":"lang8_preprocess.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"25"}
+{"seq_id":"42630114465","text":"# coding=utf-8\nfrom escpos import *\nusb = printer.Usb(0x0416, 0x5011, 0, out_ep=0x01)\n\nusb._raw('\\x1b\\x40')\nusb.set(font='a', type='b', align='center', width=2, height=2)\nusb.text(u\"LesPark 拉拉公园\\n\\n\".encode('gbk'))\nusb.set()\nusb.text(u\"陪领导打麻将,领导无意中说:我最欣赏蒋介石,宋美龄喜欢梧桐,蒋介石就在南京种满了梧桐。我心领神会,打出一张五筒,领导:胡。\\n\\n\".encode(\"gbk\"))\nusb.image('logo.png')\nusb.qr('http://ad.lespark.us/?m=download')\nusb._raw('\\x0a')\n# usb.set(codepage=None, align='center')#设置页面居中\n\n# usb.cut()#切纸\n","repo_name":"mastergyp/printer","sub_path":"printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"25"}
+{"seq_id":"12057997999","text":"import numpy\r\nimport urllib\r\nimport scipy.optimize\r\nimport random\r\nfrom urllib import request\r\n\r\ndef parseData(fname):\r\n for l in urllib.request.urlopen(fname):\r\n yield eval(l)\r\n\r\nprint (\"Reading data...\")\r\ndata = list(parseData(\"http://jmcauley.ucsd.edu/cse255/data/beer/beer_50000.json\"))\r\nprint (\"done\")\r\n\r\nX = []\r\nfor d in data:\r\n if d['beer/style'] == 'American IPA':\r\n X.append([1, 1])\r\n else:\r\n X.append([1, 0])\r\n\r\ny = [d['review/taste'] for d in data]\r\nX1 = X[:int(len(X)/2)]\r\nX2 = X[int(len(X)/2):]\r\ny1 = y[:int(len(y)/2)]\r\ny2 = y[int(len(y)/2):]\r\ntheta,residuals,rank,s = numpy.linalg.lstsq(X1, y1)\r\nprint ('MSE of training part is: ', residuals[0]/len(y1))\r\n\r\nX2 = numpy.matrix(X2)\r\ny2 = numpy.matrix(y2)\r\n# array1 = numpy.subtract(numpy.dot(theta, X2.T),y2)\r\n# array2 = numpy.array(array1)**2\r\n# res = numpy.sum(array2)\r\n# res /= int(len(X)/2)\r\n# print ('MSE of testing part is: ', res)\r\nA = numpy.dot(theta, X2.T)\r\nmse = numpy.square(A - y2).mean()\r\nprint ('MSE of testing part is: ', mse)\r\n","repo_name":"JiaqiHe/Web-Mining-and-Recommender-System","sub_path":"hw1/hw1_3.py","file_name":"hw1_3.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"40380825078","text":"\"\"\"\nCodeforces Educational Round\n\nProblem 598 A Tricky Sum\n\n@author yamaton\n@date 2015-11-12\n\"\"\"\n\nimport itertools as it\nimport functools\nimport operator\nimport collections\nimport math\nimport sys\n\n\ndef solve(x):\n simple_sum = x * (x + 1) // 2\n\n pow = -1\n while x > 0:\n x >>= 1\n pow += 1\n\n sum_power_of_twos = 2**(pow + 1) - 1\n return simple_sum - 2 * sum_power_of_twos\n\n\ndef p(*args, **kwargs):\n return print(*args, file=sys.stderr, **kwargs)\n\ndef main():\n t = int(input())\n xs = [int(input()) for _ in range(t)]\n\n results = [solve(x) for x in xs]\n for res in results:\n print(res)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yamaton/codeforces","sub_path":"problemSet/598A_Tricky_Sum.py","file_name":"598A_Tricky_Sum.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"13652775258","text":"import time\n\nimport requests\nimport numpy as np\nimport pyaudio\nimport threading\nimport websocket\n\nCHUNK = 1024\nCHANNELS = 1\nRATE = 16000\n\n\n# 录音类 监听声卡\nclass Record:\n def __init__(self):\n self.CHUNK = CHUNK\n self.FORMAT = pyaudio.paInt16\n self.CHANNELS = CHANNELS\n self.RATE = RATE\n self._running = True\n self._frames = []\n\n # 获取内录设备序号,在windows操作系统上测试通过,hostAPI = 0 表明是MME设备\n def findInternalRecordingDevice(self, p):\n # 要找查的设备名称中的关键字\n target = '立体声混音'\n # 逐一查找声音设备\n for i in range(p.get_device_count()):\n devInfo = p.get_device_info_by_index(i)\n if devInfo['name'].find(target) >= 0 and devInfo['hostApi'] == 0:\n # print('已找到内录设备,序号是 ',i)\n return i\n print('无法找到内录设备!')\n return -1\n\n # 开始录音,开启一个新线程进行录音操作\n def start(self):\n threading._start_new_thread(self.__record, ())\n\n # 执行录音的线程函数\n def __record(self):\n self._running = True\n self._frames = []\n\n p = pyaudio.PyAudio()\n # 查找内录设备\n dev_idx = self.findInternalRecordingDevice(p)\n if dev_idx < 0:\n return\n # 在打开输入流时指定输入设备\n stream = p.open(input_device_index=dev_idx,\n format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK)\n # 循环读取输入流\n while self._running:\n data = stream.read(self.CHUNK)\n self._frames.append(data)\n\n def __recognition(self, wave_data):\n url = 'http://127.0.0.1:20000/'\n token = 'qwertasd'\n data = {'token': token, 'fs': RATE, 'wavs': wave_data}\n r = requests.post(url, data)\n\n r.encoding = 'utf-8'\n\n return r.text\n\n # 识别接口 实时识别声卡录制的声音\n\n\ndef recognition(wave_data):\n url = 'http://127.0.0.1:20000/'\n token = 'qwertasd'\n data = {'token': token, 'fs': RATE, 'wavs': wave_data}\n r = requests.post(url, data)\n\n r.encoding = 'utf-8'\n\n return r.text\n\n\ndef read_buffer_data(buff_data):\n wave_data = np.frombuffer(buff_data, dtype=np.short) # 将声音文件数据转换为数组矩阵形式\n wave_data.shape = -1, CHANNELS # 按照声道数将数组整形,单声道时候是一列数组,双声道时候是两列的矩阵\n wave_data = wave_data.T # 将矩阵转置\n # wave_data = wave_data\n return wave_data\n\n\ndef main():\n record = Record()\n record.start()\n running = True\n while running:\n time.sleep(2)\n wave_data = read_buffer_data(b''.join(record._frames))\n record._frames = []\n txt = recognition(wave_data)\n print(txt, end='')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chendayin/ASRT_v0.6.1","sub_path":"auto_speech_realTime.py","file_name":"auto_speech_realTime.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"19668411499","text":"#!/usr/bin/env python\n# coding: utf8\nfrom __future__ import absolute_import\nfrom types import ListType, DictType\n\nfrom jmessage.core import conf\nfrom jmessage.core.base import BaseSDK\nfrom jmessage.core.exceptions import ParamsError\n\n\nclass Groups(BaseSDK):\n \"\"\"群组管理API\n\n :param appKey: string, 应用的AppKey\n :param masterSecret: string, 相应的masterSecret\n \"\"\"\n\n def __init__(self, appKey, masterSecret):\n super(Groups, self).__init__(appKey, masterSecret)\n\n def get_user_groups(self, username):\n \"\"\"根据用户名获取群组列表\n\n :param username: string, 用户名\n \"\"\"\n self._check_username(username)\n\n method = 'GET'\n url = self._make_url(conf.GET_USER_GROUPS, username=username)\n resp = self._common_process(method, url)\n content = self._handle_content_response(resp)\n return content\n\n def get_group_info(self, group_id):\n \"\"\"根据群组ID获取群组信息\n\n :param group_id: int, 群组ID\n \"\"\"\n method = 'GET'\n url = self._make_url(conf.GET_GROUP_INFO, gid=group_id)\n resp = self._common_process(method, url)\n content = self._handle_content_response(resp)\n return content\n\n def get_group_members(self, group_id):\n \"\"\"根据群组ID获取群组成员\n\n :param group_id: int, 群组ID\n \"\"\"\n method = 'GET'\n url = self._make_url(conf.GET_GROUP_MEMBER_LIST, gid=group_id)\n resp = self._common_process(method, url)\n content = self._handle_content_response(resp)\n return content\n\n def get_app_groups(self, start=0, count=100):\n \"\"\"获取应用下的群组列表\n\n :param start: int, 起始位置,默认从第一个开始,即0\n :param count: int, 获取数量, 默认获取100个\n \"\"\"\n method = 'GET'\n url = self._make_url(\n conf.GET_APP_GROUPS,\n start=start,\n count=count\n )\n resp = self._common_process(method, url)\n content = self._handle_content_response(resp)\n return content\n\n def create_group(self, owner_username, group_name,\n members_username=[], group_desc=\"\"):\n \"\"\"创建群组\n\n :param owner_username: string, 群组创始人\n :param group_name: string, 群组名称\n :param members_username: list, 初始成员列表, 默认为空\n :param group_desc: string, 群组简介,默认为空\n \"\"\"\n method = 'POST'\n url = self._make_url(conf.CREATE_GROUP)\n params = {\n \"owner_username\": owner_username,\n \"group_name\": group_name,\n \"members_username\": members_username,\n \"group_desc\": group_desc\n }\n\n resp = self._common_process(method, url, params=params)\n content = self._handle_content_response(resp)\n return content\n\n def update_group_members(self, group_id, op_dict):\n \"\"\"添加或删除群组成员\n\n :param group_id: int, 群组ID\n :param op_dict: dict, 成员操作信息,\n key为`add`或`remove`, value为用户名列表\n \"\"\"\n if not isinstance(op_dict, DictType):\n raise ParamsError(\"You should pass dict type parameters\\\n when update group members.\")\n\n for k, v in op_dict.items():\n if k not in ('add', 'remove'):\n raise ParamsError(\"action must be `add` or `remove`\\\n when update group members.\")\n if not isinstance(v, ListType):\n raise ParamsError(\"usernames must be grouped to a list\\\n when update group members.\")\n\n method = 'POST'\n url = self._make_url(conf.UPDATE_GROUP_MEMBERS, gid=group_id)\n resp = self._common_process(method, url, params=op_dict)\n status = self._handle_status_response(resp)\n return status\n\n def del_group(self, group_id):\n \"\"\"根据群组ID删除群组\n\n :param group_id: int, 群组ID\n \"\"\"\n method = 'DELETE'\n url = self._make_url(conf.DEL_GROUP, gid=group_id)\n resp = self._common_process(method, url)\n status = self._handle_status_response(resp)\n return status\n\n def update_group_info(self, group_id, group_name=None, group_desc=None):\n \"\"\"更新群组信息\n\n :param group_id: int, 群组ID\n :param group_name: string, 群组名称,可选\n :param group_desc: string, 群组简介,可选\n\n group_name和group_desc至少要设置一个\n \"\"\"\n method = 'PUT'\n url = self._make_url(conf.UPDATE_GROUP_INFO, gid=group_id)\n\n params = {}\n if group_name is not None:\n params.update({'group_name': group_name})\n\n if group_desc is not None:\n params.update({'group_desc': group_desc})\n\n if not params:\n raise ParamsError(\"At least specify one of `group_name`\\\n and `group_desc`.\")\n\n resp = self._common_process(method, url, params=params)\n status = self._handle_status_response(resp)\n return status\n","repo_name":"readthecodes/JMessage-Python-SDK","sub_path":"jmessage/core/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"}
+{"seq_id":"15565335756","text":"#set: duplikáció mentes, iterálható objektum\n\nmy_set = set()\nmy_set = {1, 2, 3, 4, \"Ricsi\", \"Ricsi\"}\n\n# print(type(my_set))\n\n# print(my_set)\n\n# {'Jolán' : [1, 4, 5, 6],\n# 'Ibolya' : [1, 8, 9],\n# 'Jácint': [10, 22, 4],\n# 'Karcsi': [5, 11, 22],\n# # 'Pista': [8, 1, 4, 40]\n# }\n\nmy_set = {1, 4, 5, 6}\nmy_set = {8, 9}\nmy_set2 = {1, 8, 9}\nmy_set3 = {5, 6, 7}\n# sethez hozzáadok\n# my_set.add(3)\n\n# # setből törlök\n# my_set.pop()\n# my_set.remove(4)\n\ntemp = my_set.difference(my_set2, my_set3)\n\nprint(temp)\n\ntemp = my_set.intersection(my_set2)\n\nprint(temp)\n\ntemp = my_set.issubset(my_set2)\n\nprint(temp)\n\n","repo_name":"devloulou/2022_oktober_python","sub_path":"7. alkalom/python_set.py","file_name":"python_set.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"1806432297","text":"import os\nimport sys\nimport time\nimport pyupbit\nfrom PyQt5.QtCore import QThread\nfrom pyupbit import WebSocketManager\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom utility.static import now\nfrom utility.setting import ui_num, DICT_SET\n\n\nclass WebsTicker(QThread):\n def __init__(self, qlist):\n \"\"\"\n 0 1 2 3 4 5 6 7 8 9 10\n qlist = [windowQ, soundQ, query1Q, query2Q, teleQ, sreceivQ, creceivQ, stockQ, coinQ, sstgQ, cstgQ,\n tick1Q, tick2Q, tick3Q, tick4Q, tick5Q]\n 11 12 13 14 15\n \"\"\"\n super().__init__()\n self.windowQ = qlist[0]\n self.creceivQ = qlist[6]\n self.coinQ = qlist[8]\n self.cstgQ = qlist[10]\n self.tick5Q = qlist[15]\n self.list_jang = []\n self.websQ_ticker = None\n\n def run(self):\n \"\"\" get_tickers 리턴 리스트의 갯수가 다른 버그 발견, 1초 간격 3회 조회 후 길이가 긴 리스트를 티커리스트로 정한다 \"\"\"\n codes = pyupbit.get_tickers(fiat=\"KRW\")\n time.sleep(1)\n codes2 = pyupbit.get_tickers(fiat=\"KRW\")\n codes = codes2 if len(codes2) > len(codes) else codes\n time.sleep(1)\n codes2 = pyupbit.get_tickers(fiat=\"KRW\")\n codes = codes2 if len(codes2) > len(codes) else codes\n dict_tsbc = {}\n self.websQ_ticker = WebSocketManager('ticker', codes)\n while True:\n if not self.creceivQ.empty():\n data = self.creceivQ.get()\n self.UpdateJango(data)\n\n data = self.websQ_ticker.get()\n if data == 'ConnectionClosedError':\n self.windowQ.put([ui_num['C단순텍스트'], '시스템 명령 오류 알림 - WebsTicker 연결 끊김으로 다시 연결합니다.'])\n self.websQ_ticker = WebSocketManager('ticker', codes)\n else:\n code = data['code']\n t = data['trade_time']\n v = data['trade_volume']\n gubun = data['ask_bid']\n try:\n pret = dict_tsbc[code][0]\n bids = dict_tsbc[code][1]\n asks = dict_tsbc[code][2]\n except KeyError:\n pret = None\n bids = 0\n asks = 0\n if gubun == 'BID':\n dict_tsbc[code] = [t, bids + float(v), asks]\n else:\n dict_tsbc[code] = [t, bids, asks + float(v)]\n if t != pret:\n c = data['trade_price']\n o = data['opening_price']\n h = data['high_price']\n low = data['low_price']\n per = round(data['signed_change_rate'] * 100, 2)\n dm = data['acc_trade_price']\n bids = dict_tsbc[code][1]\n asks = dict_tsbc[code][2]\n tbids = data['acc_bid_volume']\n tasks = data['acc_ask_volume']\n dt = data['trade_date'] + t\n dict_tsbc[code] = [t, 0, 0]\n data = [code, c, o, h, low, per, dm, bids, asks, tbids, tasks, dt, now()]\n self.tick5Q.put(data)\n if DICT_SET['업비트트레이더']:\n injango = code in self.list_jang\n self.cstgQ.put(data + [injango])\n if injango:\n self.coinQ.put([code, c])\n\n def UpdateJango(self, data):\n if data[0] == '잔고편입':\n if data[1] not in self.list_jang:\n self.list_jang.append(data[1])\n elif data[0] == '잔고청산':\n if data[1] in self.list_jang:\n self.list_jang.remove(data[1])\n\n\nclass WebsOrderbook(QThread):\n def __init__(self, qlist):\n \"\"\"\n 0 1 2 3 4 5 6 7 8 9 10\n qlist = [windowQ, soundQ, query1Q, query2Q, teleQ, sreceivQ, creceivQ, stockQ, coinQ, sstgQ, cstgQ,\n tick1Q, tick2Q, tick3Q, tick4Q, tick5Q]\n 11 12 13 14 15\n \"\"\"\n super().__init__()\n self.windowQ = qlist[0]\n self.coinQ = qlist[8]\n self.cstgQ = qlist[10]\n self.tick5Q = qlist[15]\n self.websQ_order = None\n\n def run(self):\n \"\"\" get_tickers 리턴 리스트의 갯수가 다른 버그 발견, 1초 간격 3회 조회 후 길이가 긴 리스트를 티커리스트로 정한다 \"\"\"\n codes = pyupbit.get_tickers(fiat=\"KRW\")\n time.sleep(1)\n codes2 = pyupbit.get_tickers(fiat=\"KRW\")\n codes = codes2 if len(codes2) > len(codes) else codes\n time.sleep(1)\n codes2 = pyupbit.get_tickers(fiat=\"KRW\")\n codes = codes2 if len(codes2) > len(codes) else codes\n self.cstgQ.put(['관심종목', codes])\n self.websQ_order = WebSocketManager('orderbook', codes)\n while True:\n data = self.websQ_order.get()\n if data == 'ConnectionClosedError':\n self.windowQ.put([ui_num['C단순텍스트'], '시스템 명령 오류 알림 - WebsOrderbook 연결 끊김으로 다시 연결합니다.'])\n self.websQ_order = WebSocketManager('orderbook', codes)\n else:\n code = data['code']\n tsjr = data['total_ask_size']\n tbjr = data['total_bid_size']\n s5hg = data['orderbook_units'][4]['ask_price']\n s4hg = data['orderbook_units'][3]['ask_price']\n s3hg = data['orderbook_units'][2]['ask_price']\n s2hg = data['orderbook_units'][1]['ask_price']\n s1hg = data['orderbook_units'][0]['ask_price']\n b1hg = data['orderbook_units'][0]['bid_price']\n b2hg = data['orderbook_units'][1]['bid_price']\n b3hg = data['orderbook_units'][2]['bid_price']\n b4hg = data['orderbook_units'][3]['bid_price']\n b5hg = data['orderbook_units'][4]['bid_price']\n s5jr = data['orderbook_units'][4]['ask_size']\n s4jr = data['orderbook_units'][3]['ask_size']\n s3jr = data['orderbook_units'][2]['ask_size']\n s2jr = data['orderbook_units'][1]['ask_size']\n s1jr = data['orderbook_units'][0]['ask_size']\n b1jr = data['orderbook_units'][0]['bid_size']\n b2jr = data['orderbook_units'][1]['bid_size']\n b3jr = data['orderbook_units'][2]['bid_size']\n b4jr = data['orderbook_units'][3]['bid_size']\n b5jr = data['orderbook_units'][4]['bid_size']\n data = [code, tsjr, tbjr,\n s5hg, s4hg, s3hg, s2hg, s1hg, b1hg, b2hg, b3hg, b4hg, b5hg,\n s5jr, s4jr, s3jr, s2jr, s1jr, b1jr, b2jr, b3jr, b4jr, b5jr]\n self.tick5Q.put(data)\n if DICT_SET['업비트트레이더']:\n self.cstgQ.put(data)\n","repo_name":"bsstory/PyStockTrader","sub_path":"coin/receiver_upbit.py","file_name":"receiver_upbit.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"25"}
+{"seq_id":"14020562814","text":"import numpy\nfrom numpy import lexsort, array, hstack\nfrom scipy.spatial.distance import cdist\nimport time\n\n### common math funcs\nfrom numpy import arccos, arcsin, arctan, arctan2, ceil, cos, cosh, exp, fabs, \\\n floor, fmod, hypot, ldexp, log, log10, modf, power, sin, sinh, sqrt, tan, tanh, \\\n maximum, minimum, e, pi\n \n\n\ndef dist_dep_conn_3d_3d(pop3d1, pop3d2, conn_prob = 1.0, \n min_delay = 1.0, max_delay = 20.0,\n max_dist = 3., weight=1.0,\n dist_rule = \"1.0/d\", new_format=False):\n \n d = cdist(pop3d1, pop3d2, 'euclidean')\n\n d_results = eval(dist_rule)\n\n max_dist = eval(dist_rule, globals(), {'d': max_dist})\n \n #~ print \"min\", d_results.min()\n #~ print \"max\", d_results.max()\n #~ print \"max_dist\", max_dist\n \n smaller_dists = d_results > max_dist # exponential!\n \n #~ print \"sum of smaller \", numpy.sum(smaller_dists)\n #~ print \"num conn \", \n if numpy.sum(smaller_dists) == 0:\n print(\"dist dep conn, no connections!\")\n return []\n\n numpy.random.seed(int(time.time()))\n \n d -= numpy.abs(numpy.min(d))\n d /= numpy.max(d)/(max_delay - min_delay)\n d += min_delay\n\n d *= smaller_dists\n \n #numpy.random.normal(loc=max_dist, size=d.shape)\n conn_list = []\n row_count = 0\n w = 0\n delay = 0\n for row in d:\n col_count = 0\n for dist in row:\n if (pop3d1 is pop3d2) and (col_count == row_count):\n col_count += 1\n continue\n \n if dist > 0 and numpy.random.random() <= conn_prob:\n w = weight*numpy.random.random()\n delay = maximum(0., dist + min_delay*(numpy.random.random() - 0.5))\n if new_format:\n conn_list.append((row_count, col_count, \n weight/dist, dist))\n else:\n conn_list.append([row_count, col_count, \n weight/dist, dist])\n\n col_count += 1\n row_count += 1\n #conn_list = numpy.array(conn_list)\n return conn_list\n\n","repo_name":"AlexBoro/polychronization","sub_path":"generate_connections.py","file_name":"generate_connections.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"71088471105","text":"import time\nimport calendar\nimport threading\nfrom datetime import datetime\nfrom logging import RootLogger\n\nfrom typeguard import typechecked\nfrom pydispatch import dispatcher\n\nfrom repository.SensorsRepository import SensorsRepository\nfrom model.Sensor import Sensor\nfrom model.SensorProperties import SensorProperties\nfrom event.SensorUpdateEvent import SensorUpdateEvent\nfrom communication.ZWaveDevice import ZWaveDevice\n\n\nclass SensorsPollingThread(threading.Thread):\n @typechecked()\n def __init__(self, polling_interval: int, sensors_repo: SensorsRepository, zwave_device: ZWaveDevice,\n logger: RootLogger):\n threading.Thread.__init__(self)\n self.__polling_interval = polling_interval\n self.__sensors_repo = sensors_repo\n self.__zwave_device = zwave_device\n self.__logger = logger\n self.shutdown = False\n\n @typechecked()\n def run(self) -> None:\n while not self.shutdown:\n self.__update_sensors()\n time.sleep(self.__polling_interval)\n\n def __update_sensors(self):\n sensors = self.__sensors_repo.get_sensors()\n for sensor in sensors:\n if None is sensor.properties.get(SensorProperties.POLLING):\n continue\n if sensor.device_type == Sensor.DeviceType.ZWAVE.value:\n self.__update_zwave_sensor(sensor)\n\n def __update_zwave_sensor(self, sensor: Sensor):\n new_value = self.__zwave_device.get_sensor_value(sensor.id)\n self.__logger.info(\"New value for sensor with id {0} is {1}: \".format(sensor.id, new_value))\n if None is not new_value:\n sensor.value = round(new_value, 1)\n sensor.last_updated = calendar.timegm(datetime.now().timetuple())\n self.__sensors_repo.set_sensor(sensor)\n dispatcher.send(SensorUpdateEvent.NAME, event=SensorUpdateEvent(sensor))","repo_name":"danionescu0/home-automation","sub_path":"python-server/communication/SensorsPollingThread.py","file_name":"SensorsPollingThread.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"25"}
+{"seq_id":"28502161837","text":"### An example of a try/except routine to mitigate errors\n\nmydata = [\n\t{'name':'Karen','yearofbirth':1999},\n\t{'name':'Walter'},\n\t]\n\nfor item in mydata:\n\ttry:\n\t\titem['age'] = 2020 - item['yearofbirth']\n\t\tprint(item['age'])\n\texcept:\n\t\tprint('Missing data...')\n","repo_name":"uq-courtois/digitalanalytics","sub_path":"Week 3/try-except-errorhandling.py","file_name":"try-except-errorhandling.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"25"}
+{"seq_id":"74065330624","text":"\"\"\" This module contains routines for building systems, which can be used for SPR-KKR calculations\n- e.g. system with vacuum pseudoatoms, or 2D semiinfinite systems\n\"\"\"\n\nfrom ..ase.build import aperiodic_times, stack as _stack\nfrom .atoms_region import AtomsRegion\nfrom .sprkkr_atoms import SPRKKRAtoms\nimport math\nimport numpy as np\nfrom ase import Atoms\nfrom ase.build import surface\n\nfrom numbers import Real\nfrom typing import Union, List, Tuple, Optional,Dict\n\ndef semiinfinite_system(atoms:Atoms, repeat:Union[Tuple[Real,Real],Real], atoms2:Atoms=None,\n hkl:Optional[Tuple[Real]]=None, hkl2:Optional[Tuple[Real]]=None,\n axis:int=2):\n \"\"\" Build a semiinfinite system from one or two 3D periodic systems.\n\n If two systems are given, they have to have identical the first two\n lattice vectors. If one system is given, the second system is created\n as a copy of the first system, but with vacuum (pseudo)atoms on its sites.\n\n Parameters\n ----------\n atoms\n The left bulk region of the result.\n\n repeat\n How many times should be the outer regions repeated in the central (non-bulk)\n region. If only one number is given, it is considered as the number of repeating\n of the left bulk region in the central region. The number of repeating of the right\n region then will be determined such that it will have the same integer part as the of\n the given number and the decimal part will be complement (to one).\n\n atoms2\n The right bulk region of the result. If it is None,\n it is created from the left one by replacing the atoms for vacuum pseudoatoms\n\n hkl\n If not None, rotate the left atoms according the given Miller coordinates, first.\n\n hkl2\n If not None, rotate the right atoms according the given Miller coordinates, first.\n If it is None, and the atoms2 are None too, the hkl argument is used for rotating\n the atoms2 object.\n\n axis\n Along which axis build the system.\n \"\"\"\n\n if isinstance(repeat, (int, float)):\n repeat=(repeat, math.floor(repeat) + math.ceil(repeat) - repeat)\n\n if hkl is not None:\n atoms1 = surface(atoms, hkl, 1)\n else:\n atoms1 = atoms\n\n if atoms2 is None:\n atoms2 = vacuum_like(atoms1 if hkl2 is None else atoms)\n if hkl2 is not None:\n atoms = surface(atoms, hkl, 1)\n\n catoms2 = aperiodic_times(atoms2, repeat[1], axis=axis, direction=-1)\n catoms = aperiodic_times(atoms1, repeat[0], axis=axis)\n catoms = _stack([catoms, catoms2], axis=axis)\n\n out = stack( {'left': atoms1,\n 'central': catoms,\n 'right': atoms2}, axis=axis)\n return out\n\ndef stack(atomses:Dict[str,Atoms], axis:int, *args, inherit_cell=True, **kwargs):\n \"\"\" Stack the atoms along given axis to a one object, creating the regions in the atoms.\n\n The function accepts list of the names, the list of the Atoms objects, and all the other parameters\n of the function :func:`ase2sprkkr.ase.build.stack`.\n \"\"\"\n cnt = None\n out = _stack(atomses.values(), axis=axis, *args, **kwargs)\n last = list(atomses.items())[-1]\n\n if inherit_cell is True:\n inherit_cell = np.ones(3, dtype=bool)\n inherit_cell[axis]=False\n\n for name, atoms in atomses.items():\n if atoms is last:\n upto = None\n else:\n upto = (cnt or 0) + len(atoms)\n AtomsRegion.from_atoms(atoms, name, slice(cnt, upto), inherit_cell=inherit_cell, atoms=out)\n cnt=upto\n return out\n\n\ndef vacuum_like(atoms):\n \"\"\"\n Creates a copy of atoms, filled with vacuum pseudoatoms\n \"\"\"\n out = atoms.copy()\n SPRKKRAtoms.promote_ase_atoms(out)\n for site in out.sites:\n site.occupation = { 'Vc' : 1.0 }\n return out\n","repo_name":"ase2sprkkr/ase2sprkkr","sub_path":"src/ase2sprkkr/sprkkr/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"25"}
+{"seq_id":"6246801914","text":"import wav\nimport numpy as np\n\naudio_file = \"./original_book_36min.wav\"\nframe_size = 1000\nthreadhold = 1e-3\n\nsignal, sample_rate = wav.read_wav(audio_file)\n\nnum_of_frame, left_over = divmod(len(signal), frame_size)\n\noutput = []\nfor i in range(num_of_frame):\n current_frame = signal[i*frame_size:(i+1)*frame_size]\n energy = np.sum(np.square(current_frame))\n if energy >= threadhold:\n output.extend(current_frame.tolist())\n\nif left_over != 0:\n current_frame = signal[num_of_frame*frame_size:]\n energy = np.sum(np.square(current_frame))\n if energy >= threadhold:\n output.extend(current_frame.tolist())\n\nwav.write_wav(output, audio_file.replace(\".wav\", \"_eliminated.wav\"), sample_rate)","repo_name":"jeongchanYu/Audio_eliminate_empty_frame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"8182636205","text":"### DIFICIL ### DIFICIL ### DIFICIL ### DIFICIL ### DIFICIL ###\r\n\r\n''' Melhore o DESAFIO 61, perguntando para o usuário se ele quer mostrar\r\nmais alguns termos. O programa encerrará quando ele disser que quer mostrar 0 termos.'''\r\nt=int(input('Primeiro termo:'))\r\nr=int(input('Razão:'))\r\ntermo=t\r\nc=1\r\nmais =10\r\ntotal=0\r\nwhile mais != 0:\r\n total += mais\r\n while c <= total:\r\n print('{}'.format(termo),end=' ')\r\n c+=1\r\n termo += r\r\n print('pausa')\r\n mais=int(input('Quantos termos mostrar? :'))\r\nprint('Progressão finalizada com {} termos mostrados'.format(total))","repo_name":"SrLuidMessias/MeusCodigos","sub_path":"Exercicios/062-super progressao aritmetica v 3.0.py","file_name":"062-super progressao aritmetica v 3.0.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"8213598105","text":"import flatbuffers\nimport zmq\n\nimport registrar.Registrar.Command\nimport registrar.Registrar.Connect\nimport registrar.Registrar.Create\nimport registrar.Registrar.Message\n\nfrom .utils import send_request, serialize_mock_client, build_command\n\nSUB_PORT = '5555'\nREQ_PORT = '5556'\n\ncontext = zmq.Context()\n\nprint('Connecting to server...')\nreq_socket = context.socket(zmq.REQ)\nreq_socket.connect(\"tcp://localhost:%s\" % REQ_PORT)\n\nsub_socket = context.socket(zmq.SUB)\nsub_socket.connect('tcp://localhost:%s' % SUB_PORT)\n\ndef build_create_request(builder):\n client = serialize_mock_client(builder)\n\n name = builder.CreateString('New room')\n registrar.Registrar.Create.CreateStart(builder)\n registrar.Registrar.Create.CreateAddName(builder, name)\n registrar.Registrar.Create.CreateAddClient(builder, client)\n return registrar.Registrar.Create.CreateEnd(builder)\n\ndef read_create_response(command):\n union_create = registrar.Registrar.Create.Create()\n union_create.Init(command.Message().Bytes, command.Message().Pos)\n\n room = union_create.Room()\n print('CLIENT: Created room with guid ' + str(room.Guid()))\n return room.Guid()\n\ndef read_connect_response(command):\n union_connect = registrar.Registrar.Connect.Connect()\n union_connect.Init(command.Message().Bytes, command.Message().Pos)\n\n client = union_connect.Client()\n print('CLIENT: Another client joined my room with ip ' + str(client.Ip()))\n\n############ CREATE REQUEST ############\n\nbuilder = flatbuffers.Builder(1024)\ncreate_offset = build_create_request(builder)\noffset = build_command(\n builder,\n registrar.Registrar.Message.Message().Create,\n create_offset)\n\nprint('CLIENT: Sending a Create request')\nresponse = send_request(builder, req_socket, offset)\n\ncommand = registrar.Registrar.Command.Command.GetRootAsCommand(response, 0)\n\nif command.MessageType() == registrar.Registrar.Message.Message().Create:\n print('CLIENT: Received Create response from server')\n room_guid = read_create_response(command)\n sub_socket.setsockopt(zmq.SUBSCRIBE, room_guid)\nelse:\n print('ERROR: Expected Create message type but got another')\n\nwhile True:\n [address, contents] = sub_socket.recv_multipart()\n connect_command = registrar.Registrar.Command.Command.GetRootAsCommand(contents, 0)\n read_connect_response(connect_command)\n\nsub_socket.close()\ncontext.term()\n","repo_name":"dblarons/networks-project","sub_path":"registrar/mock_client/creator.py","file_name":"creator.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"72010084547","text":"Array = [0,1,2,6,3,1,4,5,6,7]\r\ndef maxLengthInOrder(nArray):\r\n '''Function that counts the largest ascending sequence inside a list \r\n '''\r\n longArray = [0,0] # array for pointers of max list \r\n p1 = 0 # start of current list\r\n \r\n for i in range(len(nArray)-1):\r\n if nArray[i+1] < nArray[i]: #Checks for increase \r\n if (i - p1) >= (longArray[1] - longArray[0]): \r\n longArray = [p1,i] #Checks the difference of two lists\r\n p1 = i + 1\r\n \r\n if (i - p1) >= (longArray[1] - longArray[0]): \r\n longArray = [p1,i+1]\r\n \r\n return nArray[longArray[0]:longArray[1]+1] \r\n\r\nprint(maxLengthInOrder(Array)) \r\n","repo_name":"Patrickj06/210CT-Coursework","sub_path":"Week5/Week5Q1recusion.py","file_name":"Week5Q1recusion.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"11740002498","text":"import numpy as np\nimport math\nfrom scipy.spatial.transform import Rotation\n\ndef correct_offset_rot(uncorrected_lat, uncorrected_lon, yaw, pitch, roll, altitude, altitude_offset, a, b, c):\n # Altitude\n relative_altitude = altitude - altitude_offset\n\n # Camera Attitude\n camera_pitch = a / 100\n camera_roll = b / 100\n camera_yaw = np.fmod((c / 100), 360)\n\n # Rotations\n platform_rotation = Rotation.from_euler('ZYX', (yaw, pitch, roll))\n sensor_rotation = Rotation.from_euler('ZYX', (camera_yaw, camera_pitch, camera_roll), degrees=True)\n\n # Combine rotations\n platform_sensor_rotation = (platform_rotation*sensor_rotation).inv().as_matrix()\n\n # Extract combined yaw-pitch-roll\n r = Rotation.from_matrix(platform_sensor_rotation)\n combined_angles = r.as_euler(\"zyx\")\n\n combined_yaw = combined_angles[0]\n combined_pitch = combined_angles[1]\n\n # Calculate magnitude\n theta = (math.pi / 2) - combined_pitch\n correction_magnitude_meters = relative_altitude * math.tan(theta)\n meter_as_dec_degrees = (1/111111) / math.cos(math.radians(uncorrected_lat))\n correction_magnitude_degrees = correction_magnitude_meters * meter_as_dec_degrees\n\n # Calculate direction\n correction_direction = (combined_yaw + 180) % 360\n\n # Calculate corrected lat-lon\n lat_correction = correction_magnitude_degrees * math.cos(correction_direction)\n lon_correction = correction_magnitude_degrees * math.sin(correction_direction)\n\n corrected_lat = uncorrected_lat + lat_correction\n corrected_lon = uncorrected_lon + lon_correction \n\n return corrected_lat, corrected_lon","repo_name":"tidehackathon/team-dstl-1","sub_path":"offset_correction/offset_correction.py","file_name":"offset_correction.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"25824656726","text":"import matplotlib.pyplot as plt\nfrom lxml import etree\nfrom commonroad.scenario.scenario import Scenario\nfrom collections import defaultdict\n\nfrom opendrive2lanelet.opendriveparser.elements.opendrive import OpenDrive\nfrom opendrive2lanelet.network import Network\nimport csv\n\nfrom opendrive2lanelet.opendriveparser.parser import parse_opendrive\n\n\ndef convert_opendrive(opendrive: OpenDrive) -> Scenario:\n road_network = Network()\n road_network.load_opendrive(opendrive)\n return road_network.export_commonroad_scenario() # commonroad-io==2020.2 版本需要验证\n\n\ndef get_basic_info(opendrive, scenario):\n # 获取 link与交叉口关系\n road_junction = {}\n for road in opendrive.roads:\n road_junction[road.id] = road.junction and road.junction.id # 此道路是在交叉口内部\n # print([k for k,v in road_junction.items() if v])\n\n # 获取道路与路段关系\n lanes_info = defaultdict(dict)\n for lane in scenario.lanelet_network.lanelets:\n # 获取所在路段\n lane_name = lane.lane_name\n road_id = int(lane_name.split('.')[0])\n lanes_info[lane.lanelet_id] = {\n \"road_id\": road_id,\n \"left\": {\n \"lane_id\": lane.adj_left,\n \"same_direction\": lane.adj_left_same_direction,\n },\n \"right\": {\n \"lane_id\": lane.adj_right,\n \"same_direction\": lane.adj_right_same_direction,\n },\n \"predecessor_ids\": lane.predecessor,\n \"successor_ids\": lane.successor,\n \"type\": lane.type,\n \"name\": lane.lane_name,\n \"center_vertices\": lane.center_vertices,\n \"left_vertices\": lane.left_vertices,\n \"right_vertices\": lane.right_vertices,\n }\n # print(lane_road_map)\n return lanes_info, road_junction\n\n\ndef get_color():\n color_list = ['y', 'b', 'g', 'r']\n i = 0\n while True:\n yield color_list[i % len(color_list)]\n i += 1\n\ncolor_c = get_color()\n\ndef show_lanes(f1, f2, scenario, lanes_info, road_junction):\n # 写入文件\n writer1 = csv.writer(f1)\n writer1.writerow([\"路段ID\", \"路段名称\", \"车道ID\", \"宽度(m)\", \"中心点序列\", \"左侧折点序列\", \"右侧折点序列\"])\n\n writer2 = csv.writer(f2)\n writer2.writerow([\"连接段ID\", \"起始路段ID\", \"起始车道ID\", \"目标路段ID\", \"目标车道ID\", \"中心点序列\", \"左侧折点序列\", \"右侧折点序列\"])\n\n for lane in scenario.lanelet_network.lanelets:\n x_list = []\n y_list = []\n # 获取所在路段\n road_id = lanes_info[lane.lanelet_id]['road_id']\n lane_name = lanes_info[lane.lanelet_id]['name']\n\n for coo in lane.center_vertices:\n # 绘制中心线\n x_list.append(coo[0])\n y_list.append(coo[1])\n\n center_string = ' '.join([\"({} {}) \".format(coo[0], coo[1]) for coo in lane.center_vertices])\n left_string = ' '.join([\"({} {}) \".format(coo[0], coo[1]) for coo in lane.left_vertices])\n right_string = ' '.join([\"({} {}) \".format(coo[0], coo[1]) for coo in lane.right_vertices])\n predecessor_ids = lane.predecessor\n successor_ids = lane.successor\n if road_junction.get(road_id) is None: # 区分此路段是否属于 junction, 同时 正常车道也有前后\n color = next(color_c)\n writer1.writerow([road_id, lane_name, lane.lanelet_id, '', center_string, left_string, right_string])\n else:\n color = next(color_c)\n for successor_id in successor_ids:\n for predecessor_id in predecessor_ids:\n writer2.writerow(\n [road_id, lanes_info[successor_id]['road_id'], successor_id,\n lanes_info[predecessor_id]['road_id'], predecessor_id,\n center_string, left_string, right_string])\n plt.plot(x_list, y_list, color=color, linestyle=\"\", marker=\".\", linewidth=1)\n f1.close()\n f2.close()\n plt.show()\n\n\nif __name__ == \"__main__\":\n xodr_file = \"test.xodr\"\n\n with open(xodr_file, \"r\") as file_in:\n obj = etree.parse(file_in).getroot()\n opendrive = parse_opendrive(obj)\n # ps: 在 junction 里面也会有 lane_name\n scenario = convert_opendrive(opendrive) # 这一步删除了过多的历史信息,需要手动更改源码\n\n # 提取基础信息\n lanes_info, road_junction = get_basic_info(opendrive, scenario)\n\n f1 = open(\"车道.csv\", 'w', newline='')\n f2 = open(\"车道连接.csv\", 'w', newline='')\n\n # 获取道路详情并写入文件\n show_lanes(f1, f2, scenario, lanes_info, road_junction)\n\n # 输出为 xml文件 commroad格式, 需要更改 commonroad-io 版本\n # path = \"test1.xml\"\n # from opendrive2lanelet.io.extended_file_writer import ExtendedCommonRoadFileWriter\n # with open(path, \"w\") as fh:\n # writer = ExtendedCommonRoadFileWriter(\n # scenario,\n # source=\"OpenDRIVE 2 Lanelet Converter\",\n # )\n # writer.write_scenario_to_file_io(fh)\n","repo_name":"mingwu123123/Opendrive2TESS","sub_path":"opendrive2tess/xodr2csv.py","file_name":"xodr2csv.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"5439731975","text":"#! usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nimport os\n\nSERVER_URL = \"http://localhost:3030\"\nDATASET_URL = SERVER_URL + \"/webdata-project-kb\"\nSPARQL_ENDPOINT = DATASET_URL + \"/sparql\"\nQUERY_ENDPOINT = DATASET_URL + \"/query\"\nSPARQL_UPDATE = DATASET_URL + \"/update\"\nGRAPH_STORE = DATASET_URL + \"/data\"\nHEADERS_QUERY = {'Content-type': 'application/sparql-query'}\nHEADERS_UPDATE = {'Content-type': 'application/sparql-update'}\n\nqueries_folder = os.path.join(os.path.dirname(__file__), \"./queries/\")\nstation_json = os.path.join(os.path.dirname(__file__), \"./datasets/stations.jsonld\")\nmonument_json = os.path.join(os.path.dirname(__file__), \"./datasets/monuments.jsonld\")\nmusee_json = os.path.join(os.path.dirname(__file__), \"./datasets/musees.jsonld\")\nloc_ontology = os.path.join(os.path.dirname(__file__), \"../ontology/project-ontology.ttl\")\n\n# query to delete all the rows in the knowledge base\ndef deleteQuery():\n \"\"\"\n Returns a string with a query deleting all triples in the KB\n \"\"\"\n return \"\"\"\n PREFIX rdf: \n PREFIX ns: \n DELETE\n {\n ?s ?p ?o\n }\n WHERE\n {\n ?s ?p ?o\n }\n \"\"\"\n\ndef insertQuery(s, p, o):\n \"\"\"\n Inserts a triple into the KB\n Input: s -> subject p -> predicate o -> object\n \"\"\"\n\n return \"\"\"\n PREFIX rdf: \n PREFIX ns: \n INSERT DATA\n {\n \"\"\"+ s + ' ' + p + ' ' + o + \"\"\" .\n }\n \"\"\"\n\ndef queryFromFile(filename):\n \"\"\"\n Asks the server (Fuseki triplestore) using a .txt file containing a SPARQL query\n Input: query file path\n \"\"\"\n print(\"Opening query file\")\n with open(os.path.join(os.path.dirname(__file__), \"./queries/\") + filename, 'r') as query:\n queryString = \"\".join(query.readlines())\n print(\"Query :\\n\", queryString)\n\n \n rep = requests.post(DATASET_URL, data=queryString, headers=HEADERS_QUERY)\n print(\"Posted request.\")\n if rep.status_code != 200:\n rep.raise_for_status()\n data = rep.json()\n print(\"Got results\")\n return data[\"results\"][\"bindings\"]\n\n\n\ndef deleteDefaultGraph():\n \"\"\"\n Deletes all triples in the default graph\n \"\"\"\n rep = requests.post(DATASET_URL, data=deleteQuery(), headers=HEADERS_UPDATE)\n if rep.status_code != 204:\n rep.raise_for_status()\n return rep\n\ndef insertEntries(filename):\n \"\"\"\n Adds entries from a JSON-LD file to the triple store in the default graph\n Input: the path to the JSON-LD file\n \"\"\"\n data = open(filename).read()\n headers = {'Content-Type': 'application/ld+json'}\n r = requests.post(GRAPH_STORE + '?default', data=data, headers=headers)\n if r.status_code != 204:\n r.raise_for_status()\n else:\n print(\"Status response\",r)\n return r\n\ndef insertOntology():\n \"\"\"\n Inserts the ontology into the default graph in Fuseki\n \"\"\"\n data = open(loc_ontology).read()\n headers = {'Content-Type': 'text/turtle'}\n r = requests.post(GRAPH_STORE + '?default', data=data, headers=headers)\n if r.status_code != 204:\n r.raise_for_status()\n else:\n print(\"Status response\",r)\n\ndef main():\n # to insert data got previously in the triplestore\n # file must be runned in the very current directory\n #deleteDefaultGraph()\n insertOntology()\n insertEntries(monument_json)\n insertEntries(musee_json)\n insertEntries(station_json)\n\n #queryFromFile(queries_folder + \"get-graph-names.txt\")\n #monuments= queryFromFile(\"get-monuments.txt\")\n #for i in range(10):\n # print(monuments[i])\n # print(monuments[i]['lat']['value'])\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"pauljouet/web-data-project","sub_path":"server/fuseki_managements/manage_fuseki.py","file_name":"manage_fuseki.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"1008034922","text":"from __future__ import division, print_function\n\n# Import Python modules\nimport sys\nimport random\n\n# Import Broadband modules\nimport bband_utils\n\ndef calculate_rvfac(mean_rvfac, range_rvfac, seed):\n \"\"\"\n This function calculates a random rvfac value based on the mean\n and range values, plus a seed to generate a random number\n \"\"\"\n random.seed(seed)\n rvfac = mean_rvfac + range_rvfac * ((random.random() * 2) - 1)\n return rvfac\n\nclass GenslipCfg(object):\n \"\"\"\n Define the configuration parameters for the GP rupture generator\n \"\"\"\n\n def __init__(self, a_srcname=None):\n \"\"\"\n Sets basic class parameters, then parses a_srcname for more information\n \"\"\"\n\n # User defined parms\n self.SLIP_SIGMA = 0.75\n # This is now the default inside genslip-3.3, so don't need to use it\n # self.RAND_RAKE_RANGE = 60\n\n self.RTDEP = 6.5\n self.RTDEP_RANGE = 1.5\n self.MEAN_RVFAC = 0.8\n self.RANGE_RVFAC = 0.05\n self.SHAL_VRUP = 0.6\n\n # Default RISETIME_COEF set for western US simulations,\n # override in velocity model config file. This parameter used\n # to be set to 1.6, but was modified by RWG in November 2013\n # when the Rupture Generator was updated to version 3.3. The\n # value was reset to 1.6 for Genslip 5.0.1\n self.RISETIME_COEF = 1.6\n\n # self.EXTRA_RTFAC = 0.0\n self.RISETIME_FAC = 2\n self.RT_SCALEFAC = 1\n self.RT_RAND = 0\n\n # As in genslip-3.3, we are using 'Mliu' stype, which is the default\n # self.STYPE = \"ucsb\"\n\n # Extra parameters in genslip-3.3, updated for genslip-5.0.1\n self.SLIP_WATER_LEVEL = -1\n self.DEEP_RISETIMEDEP = 17.5\n self.DEEP_RISETIMEDEP_RANGE = 2.5\n self.DEEP_RISETIME_FAC = 2.0\n\n # Read SRC FILE\n if a_srcname:\n self.CFGDICT = bband_utils.parse_src_file(a_srcname)\n\nif __name__ == \"__main__\":\n ME = GenslipCfg()\n print(\"Created Test Config Class: %s\" % (sys.argv[0]))\n","repo_name":"taodongwang/bbp","sub_path":"bbp/comps/genslip_cfg.py","file_name":"genslip_cfg.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"25"}
+{"seq_id":"38622084244","text":"import os\nimport time\n\n\n\nREQUEST_LOG_ID = 'REQUEST_LOG_ID'\n\n\n_U_SEC = 1000000\n\nLOG_LEVEL_DEBUG = 0\nLOG_LEVEL_INFO = 1\nLOG_LEVEL_WARNING = 2\nLOG_LEVEL_ERROR = 3\nLOG_LEVEL_CRITICAL = 4\n\nLOG_LEVELS = [LOG_LEVEL_DEBUG,\n LOG_LEVEL_INFO,\n LOG_LEVEL_WARNING,\n LOG_LEVEL_ERROR,\n LOG_LEVEL_CRITICAL]\n\n\n\n_DEFAULT_LEVEL = LOG_LEVEL_ERROR\n\n\ndef _CurrentTimeMicro():\n return int(time.time() * _U_SEC)\n\n\ndef _Clean(e):\n return e.replace('\\0', '\\n')\n\n\ndef RequestID():\n \"\"\"Returns the ID of the current request assigned by App Engine.\"\"\"\n return os.environ.get(REQUEST_LOG_ID, None)\n\n\ndef _StrictParseLogEntry(entry):\n \"\"\"Parses a single log entry emitted by app_logging.AppLogsHandler.\n\n Parses a log entry of the form LOG where the\n level is in the range [0, 4]. If the entry is not of that form, ValueError is\n raised.\n\n Args:\n entry: The log entry to parse.\n\n Returns:\n A (timestamp, level, message) tuple.\n\n Raises:\n ValueError: if the entry failed to be parsed.\n \"\"\"\n magic, level, timestamp, message = entry.split(' ', 3)\n if magic != 'LOG':\n raise ValueError()\n\n timestamp, level = int(timestamp), int(level)\n if level not in LOG_LEVELS:\n raise ValueError()\n\n return timestamp, level, _Clean(message)\n\n\ndef ParseLogEntry(entry):\n \"\"\"Parses a single log entry emitted by app_logging.AppLogsHandler.\n\n Parses a log entry of the form LOG where the\n level is in the range [0, 4]. If the entry is not of that form, take the whole\n entry to be the message. Null characters in the entry are replaced by\n newlines.\n\n Args:\n entry: The log entry to parse.\n\n Returns:\n A (timestamp, level, message) tuple.\n \"\"\"\n try:\n return _StrictParseLogEntry(entry)\n except ValueError:\n\n return _CurrentTimeMicro(), _DEFAULT_LEVEL, _Clean(entry)\n\n\ndef ParseLogs(logs):\n \"\"\"Parses a str containing newline separated log entries.\n\n Parses a series of log entries in the form LOG \n where the level is in the range [0, 4]. Null characters in the entry are\n replaced by newlines.\n\n Args:\n logs: A string containing the log entries.\n\n Returns:\n A list of (timestamp, level, message) tuples.\n \"\"\"\n return [ParseLogEntry(line) for line in logs.split('\\n') if line]\n","repo_name":"alangpierce/appengine-python3","sub_path":"google/appengine/api/logservice/logsutil.py","file_name":"logsutil.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"25"}
+{"seq_id":"15932138686","text":"from winreg import *\n\nkey_to_read = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'\n\ntry:\n reg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)\n k = OpenKey(reg, key_to_read)\n\n print(\"ok found\")\n print(k)\n\nexcept:\n print(\"sorry not found\")\n","repo_name":"godokanwolf/first_myproj","sub_path":"3 copy.py","file_name":"3 copy.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"37736775813","text":"'''program to find the the largest contiguous sub array in a given array'''\nlist1 = list(map(int,input(\"enter the list:\").split()))\nreal_sum = 0 \ntemp = 0 \nflag = 0\ntemp1 = 0\nif list1[0] < 0 :\n temp = list1[0]\nfor i in list1:\n if i < 0 and i > temp:\n temp = i\n flag = -1\n\n temp1 = temp1 + i\n\n if temp1 < 0:\n temp1 = 0\n if temp1 >=real_sum:\n real_sum = temp1 \n\nif flag is -1:\n print(\"only for negative number! {}\".format(temp))\nelse:\n print(\"the largest sub-arry is {}\".format(real_sum)) ","repo_name":"satadhi/basicpython","sub_path":"kadane_ algo.py","file_name":"kadane_ algo.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"32143953707","text":"#!/usr/bin/env python3\n#Junior Design Robot\n#LED Test tool\n#Nick Ames 2018\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nimport signal\nimport os\nimport sys\nimport gen_files\n\nSoftwareDir=os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))\nsys.path.append(os.path.abspath(SoftwareDir) + \"/python-library/\")\nimport robolib\n\ndef add_slider(layout, labelstr, func, extra=None):\n\tlbl = QLabel(labelstr)\n\tsl = QSlider(Qt.Horizontal)\n\tsl.setMinimum(0)\n\tsl.setMaximum(255)\n\tsl.setTracking(True)\n\tsl.valueChanged.connect(func)\n\tif extra: sl.valueChanged.connect(extra)\n\tlayout.addRow(lbl, sl)\n\ndef add_spinbox(layout, labelstr, func, extra=None):\n\tlbl = QLabel(labelstr)\n\tsp = QDoubleSpinBox()\n\tsp.setMinimum(-10)\n\tsp.setMaximum(10)\n\tsp.valueChanged.connect(func)\n\tif extra: sl.valueChanged.connect(extra)\n\tlayout.addRow(lbl, sp)\n\ndef setup_controls(window, protocol):\n\t\"\"\"Setup the window controls.\"\"\"\n\twindow.setWindowTitle(\"Motor Tool\")\n\tww = QWidget(window)\n\tfl = QFormLayout(ww)\n\tadd_slider(fl, \"Left\", robolib.set_motor_l_target)\n\tadd_slider(fl, \"Right\", robolib.set_motor_r_target)\n\tadd_slider(fl, \"Both\", robolib.set_motor_r_target, extra=robolib.set_motor_l_target)\n\t\n\tadd_spinbox(fl, \"Kp\", robolib.set_motor_kp)\n\tadd_spinbox(fl, \"Ki\", robolib.set_motor_ki)\n\tadd_spinbox(fl, \"Kd\", robolib.set_motor_kd)\n\tww.setLayout(fl)\n\twindow.setCentralWidget(ww)\n\t\ndef main():\n\trobolib.init()\n\tsignal.signal(signal.SIGINT, signal.SIG_DFL) #Make Ctrl-C quit the program\n\tp = gen_files.parse_sheet(gen_files.SpreadsheetPath)\n\tapp = QApplication(sys.argv)\n\tw = QMainWindow()\n\tsetup_controls(w, p)\n\tw.show()\n\tsys.exit(app.exec_())\n\t\nif __name__ == \"__main__\":\n\tmain()\n\t","repo_name":"NickAmes/jdrobot","sub_path":"protocol/test-motor.py","file_name":"test-motor.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"32837406188","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nclass PDBParseData:\n ss_dis = {\n '104L_B': {\n 'disorder': '--------------------------------------------------------------------------------------------------------------------------------------------------------------------XX',\n 'secstr': ' HHHHHHHHH SB EE TTS EE TTT SS HHHHHHHHHHHS S TTB HHHHHHHHHHHHHHHHHHHHT TTHHHHHHHS SSHHHHHHHHHHHH HHHHHH HHHHHHHHTT TTHHHHHHTSSHHHHHS HHHHHHHHHHHS SGGGG ',\n 'sequence': 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSAAELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},\n '104L_A': {\n 'disorder': '--------------------------------------------------------------------------------------------------------------------------------------------------------------------XX',\n 'secstr': ' HHHHHHHHT SB EE TTS EEETTTEEEE TT HHHHHHHHHHHHTS TTB HHHHHHHHHHHHHHHHHHHTT TTTHHHHHHS HHHHHHHHHHHHHHHHHHHHT HHHHHHTTTT HHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGG ',\n 'sequence': 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSAAELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},\n '11BG_A': {\n 'disorder': '----------------------------------------------------------------------------------------------------------------------------',\n 'secstr': ' HHHHHHHHHB SSTT GGGHHHHHHHHTT SSS SEEEEE S HHHHHGGGGSEEE TTS S EEE SS EEEEEEEE TT BTTB EEEEEEEE EEEEEETTTTEEEEEEEE ',\n 'sequence': 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV'},\n '11BG_B': {\n 'disorder': '----------------------------------------------------------------------------------------------------------------------------',\n 'secstr': ' HHHHHHHHHB TT TT GGGHHHHHHHHTT SSSS SEEEEE S HHHHHGGGGSEEE SSS S EEE SS EEEEEEEE TT BTTB EEEEEEEE EEEEEETTTTEEEEEEEE ',\n 'sequence': 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV'},\n '102L_A': {\n 'disorder': '-------------------------------------------------------------------------------------------------------------------------------------------------------------------XX',\n 'secstr': ' HHHHHHHHH EEEEEE TTS EEEETTEEEESSS TTTHHHHHHHHHHTS TTB HHHHHHHHHHHHHHHHHHHHH TTHHHHHHHS HHHHHHHHHHHHHHHHHHHHT HHHHHHHHTT HHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGG ',\n 'sequence': 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'}}\n filter_pdb_chain_uniprot_input = {\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',\n 7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A',\n 12: 'A', 13: 'A', 14: 'A', 15: 'A', 16: 'A', 17: 'B', 18: 'C',\n 19: 'A', 20: 'A', 21: 'A', 22: 'A'},\n 'PDB_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 1, 5: 1, 6: 1, 7: 45, 8: 1,\n 9: 45, 10: 1, 11: 1, 12: 0, 13: 0, 14: 1,\n 15: 0, 16: 1, 17: 1, 18: 1, 19: 22, 20: 343, 21: 22,\n 22: 391},\n 'SP_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,\n 9: 45, 10: 2, 11: 2, 12: 1, 13: 1, 14: 1,\n 15: 1, 16: 2, 17: 2, 18: 2, 19: 22, 20: 126, 21: 22,\n 22: 126},\n 'SP_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,\n 8: 44, 9: 164, 10: 155, 11: 154, 12: 3,\n 13: 154, 14: 164, 15: 154, 16: 210, 17: 210, 18: 210,\n 19: 342, 20: 200, 21: 390, 22: 200},\n 'RES_BEG': {\n 0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,\n 9: 47, 10: 1, 11: -1, 12: 1, 13: 1, 14: 1,\n 15: 1, 16: 1, 17: 1, 18: 1, 19: 5, 20: 326, 21: 5, 22: 374},\n 'PDB_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 124, 5: 124, 6: 44, 7: 164,\n 8: 44, 9: 164, 10: 153, 11: 153,\n 12: 153, 13: 153, 14: 164, 15: 153, 16: 209, 17: 209,\n 18: 209, 19: 342, 20: 417, 21: 390, 22: 465},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',\n 4: 'P00669', 5: 'P00669', 6: 'P00720',\n 7: 'P00720', 8: 'P00720', 9: 'P00720', 10: 'P02185',\n 11: 'P02185', 12: 'P02185', 13: 'P02185',\n 14: 'P00720', 15: 'P02185', 16: 'P09211', 17: 'P09211',\n 18: 'P09212', 19: 'B3DIN1', 20: 'Q4G1L2',\n 21: 'B3DIN1', 22: 'Q4G1L2'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,\n 8: 44, 9: 166, 10: 153, 11: 153, 12: 3,\n 13: 154, 14: 164, 15: 154, 16: 209, 17: 209, 18: 209,\n 19: 325, 20: 400, 21: 373, 22: 448},\n 'PDB': {\n 0: '102l', 1: '102l', 2: '103l', 3: '103l', 4: '11bg',\n 5: '11bg', 6: '104l', 7: '104l', 8: '104l',\n 9: '104l', 10: '104m', 11: '105m', 12: '106m', 13: '108m',\n 14: '109l', 15: '109m', 16: '10gs',\n 17: '10gs', 18: '10gs', 19: '3v44', 20: '3v44', 21: '3v47',\n 22: '3v47'}\n }\n\n filter_pdb_chain_uniprot_expected = {\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',\n 7: 'A', 8: 'B', 9: 'B', 19: 'A', 20: 'A', 21: 'A', 22: 'A'},\n 'SP_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,\n 9: 45, 19: 22, 20: 126, 21: 22, 22: 126},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',\n 5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',\n 19: '3V44', 20: '3V44', 21: '3V47', 22: '3V47'},\n 'RES_BEG': {\n 0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,\n 9: 47, 19: 5, 20: 326, 21: 5, 22: 374},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',\n 4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',\n 8: 'P00720', 9: 'P00720', 19: 'B3DIN1', 20: 'Q4G1L2',\n 21: 'B3DIN1', 22: 'Q4G1L2'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,\n 8: 44, 9: 166, 19: 325, 20: 400, 21: 373, 22: 448},\n 'SP_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,\n 8: 44, 9: 164, 19: 342, 20: 200, 21: 390, 22: 200}\n }\n\n add_pdbseq_to_df_input = {\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',\n 7: 'A', 8: 'B', 9: 'B', 19: 'A', 20: 'A', 21: 'A', 22: 'A'},\n 'SP_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,\n 9: 45, 19: 22, 20: 126, 21: 22, 22: 126},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',\n 5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',\n 19: '3V44', 20: '3V44', 21: '3V47', 22: '3V47'},\n 'RES_BEG': {\n 0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,\n 9: 47, 19: 5, 20: 326, 21: 5, 22: 374},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',\n 4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',\n 8: 'P00720', 9: 'P00720', 19: 'B3DIN1', 20: 'Q4G1L2',\n 21: 'B3DIN1', 22: 'Q4G1L2'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,\n 8: 44, 9: 166, 19: 325, 20: 400, 21: 373, 22: 448},\n 'SP_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,\n 8: 44, 9: 164, 19: 342, 20: 200, 21: 390, 22: 200}\n }\n\n add_pdbseq_to_df_expected = {\n 'PDB_SEQ': {\n 0: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN',\n 1: 'AAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',\n 2: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',\n 3: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',\n 4: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',\n 5: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',\n 6: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',\n 7: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'B', 4: 'A', 5: 'A', 6: 'B',\n 7: 'B'},\n 'SP_BEG': {0: 1, 1: 41, 2: 27, 3: 27, 4: 1, 5: 45, 6: 1, 7: 45},\n 'SP_END': {0: 40, 1: 164, 2: 150, 3: 150, 4: 44, 5: 164, 6: 44, 7: 164},\n 'RES_BEG': {0: 1, 1: 42, 2: 1, 3: 1, 4: 1, 5: 47, 6: 1, 7: 47},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00669',\n 4: 'P00720', 5: 'P00720', 6: 'P00720', 7: 'P00720'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 124, 3: 124, 4: 44, 5: 166, 6: 44,\n 7: 166},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '11BG', 3: '11BG', 4: '104L',\n 5: '104L', 6: '104L', 7: '104L'}\n }\n\n filter_single_pdb_chain_sep_input = {\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',\n 7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A',\n 14: 'A'},\n 'SP_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,\n 9: 45, 10: 22, 11: 126, 12: 22, 13: 126, 14: 1},\n 'SP_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,\n 8: 44, 9: 164, 10: 342, 11: 200, 12: 390, 13: 200, 14: 185},\n 'RES_BEG': {\n 0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,\n 9: 47, 10: 5, 11: 326, 12: 5, 13: 374, 14: 1},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',\n 4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',\n 8: 'P00720', 9: 'P00720', 10: 'B3DIN1', 11: 'Q4G1L2',\n 12: 'B3DIN1', 13: 'Q4G1L2', 14: 'P00718'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,\n 8: 44, 9: 166, 10: 325, 11: 400, 12: 373, 13: 448, 14: 185},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',\n 5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',\n 10: '3V44', 11: '3V44', 12: '3V47', 13: '3V47', 14: '154L'}\n }\n\n filter_single_pdb_chain_sep_expected = {\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',\n 7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A'},\n 'SP_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,\n 9: 45, 10: 22, 11: 126, 12: 22, 13: 126},\n 'SP_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,\n 8: 44, 9: 164, 10: 342, 11: 200, 12: 390, 13: 200},\n 'RES_BEG': {\n 0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,\n 9: 47, 10: 5, 11: 326, 12: 5, 13: 374},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',\n 4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',\n 8: 'P00720', 9: 'P00720', 10: 'B3DIN1', 11: 'Q4G1L2',\n 12: 'B3DIN1', 13: 'Q4G1L2'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,\n 8: 44, 9: 166, 10: 325, 11: 400, 12: 373, 13: 448},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',\n 5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',\n 10: '3V44', 11: '3V44', 12: '3V47', 13: '3V47'}\n }\n\n filter_single_pdb_chain_input = {\n 'SP_PRIMARY': {0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00720'},\n 'SEC_STRUCT': {\n 0: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',\n 1: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',\n 2: '-XXXPPPHHHHHHHHHBPPPSSTTPGGGHHHHHHHHTTPPSSSPPSEEEEEPSPHHHHHGGGGSEEEPPTTSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTXXXXXXXXP',\n 3: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX'},\n 'PDB_CHAIN': {0: '104L_A', 1: '104L_B', 2: '11BG_A', 3: '102L_A'}\n }\n\n filter_single_pdb_chain_expected = {\n 'SP_PRIMARY': {0: 'P00720', 1: 'P00720', 2: 'P00720'}, 'SEC_STRUCT': {\n 0: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',\n 1: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',\n 2: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX'},\n 'PDB_CHAIN': {0: '104L_A', 1: '104L_B', 2: '102L_A'}\n }\n\n compare_to_uni_input = {\n 'PDB_SEQ': {0: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN',\n 1: 'AAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',\n 2: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',\n 3: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',\n 4: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',\n 5: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',\n 6: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',\n 7: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'B', 4: 'A', 5: 'A', 6: 'B', 7: 'B'},\n 'SP_BEG': {0: 1, 1: 41, 2: 27, 3: 27, 4: 1, 5: 45, 6: 1, 7: 45},\n 'SP_END': {\n 0: 40, 1: 164, 2: 150, 3: 150, 4: 44, 5: 164, 6: 44, 7: 164},\n 'RES_BEG': {0: 1, 1: 42, 2: 1, 3: 1, 4: 1, 5: 47, 6: 1, 7: 47},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00669',\n 4: 'P00720', 5: 'P00720', 6: 'P00720', 7: 'P00720'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 124, 3: 124, 4: 44, 5: 166, 6: 44,\n 7: 166},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '11BG', 3: '11BG', 4: '104L',\n 5: '104L', 6: '104L', 7: '104L'}\n }\n\n compare_to_uni_expected = {\n 'CHAIN': {0: 'A', 1: 'B'},\n 'SP_BEG': {0: 27, 1: 27},\n 'SP_END': {0: 150, 1: 150},\n 'RES_BEG': {0: 1, 1: 1},\n 'SP_PRIMARY': {0: 'P00669', 1: 'P00669'},\n 'RES_END': {0: 124, 1: 124},\n 'PDB': {0: '11BG', 1: '11BG'}\n }\n\n read_pdb_chain_uniprot_uniIDs_input = {\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'A', 4: 'A', 5: 'B', 6: 'A',\n 7: 'A', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A',\n 14: 'A'},\n 'SP_BEG': {\n 0: 1, 1: 41, 2: 1, 3: 41, 4: 27, 5: 27, 6: 1, 7: 45, 8: 1,\n 9: 45, 10: 22, 11: 126, 12: 22, 13: 126, 14: 1},\n 'SP_END': {\n 0: 40, 1: 164, 2: 40, 3: 164, 4: 150, 5: 150, 6: 44, 7: 164,\n 8: 44, 9: 164, 10: 342, 11: 200, 12: 390, 13: 200, 14: 185},\n 'RES_BEG': {\n 0: 1, 1: 42, 2: 1, 3: 44, 4: 1, 5: 1, 6: 1, 7: 47, 8: 1,\n 9: 47, 10: 5, 11: 326, 12: 5, 13: 374, 14: 1},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00720', 3: 'P00720',\n 4: 'P00669', 5: 'P00669', 6: 'P00720', 7: 'P00720',\n 8: 'P00720', 9: 'P00720', 10: 'B3DIN1', 11: 'Q4G1L2',\n 12: 'B3DIN1', 13: 'Q4G1L2', 14: 'P00718'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 40, 3: 167, 4: 124, 5: 124, 6: 44, 7: 166,\n 8: 44, 9: 166, 10: 325, 11: 400, 12: 373, 13: 448, 14: 185},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '103L', 3: '103L', 4: '11BG',\n 5: '11BG', 6: '104L', 7: '104L', 8: '104L', 9: '104L',\n 10: '3V44', 11: '3V44', 12: '3V47', 13: '3V47', 14: '154L'}\n }\n\n create_pdb_composite_input = {\n 'PDB_SEQ': {\n 0: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN',\n 1: 'AAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',\n 2: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',\n 3: 'KESAAAKFERQHMDSGNSPSSSSNYCNLMMCCRKMTQGKCKPVNTFVHESLADVKAVCSQKKVTCKNGQTNCYQSKSTMRITDCRETGSSKYPNCAYKTTQVEKHIIVACGGKPSVPVHFDASV',\n 4: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',\n 5: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL',\n 6: 'MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKS',\n 7: 'ELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL'},\n 'CHAIN': {\n 0: 'A', 1: 'A', 2: 'A', 3: 'B',\n 4: 'A', 5: 'A', 6: 'B', 7: 'B'},\n 'SP_BEG': {0: 1, 1: 41, 2: 27, 3: 27, 4: 1, 5: 45, 6: 1, 7: 45},\n 'SP_END': {\n 0: 40, 1: 164, 2: 150, 3: 150, 4: 44,\n 5: 164, 6: 44, 7: 164},\n 'RES_BEG': {0: 1, 1: 42, 2: 1, 3: 1, 4: 1, 5: 47, 6: 1, 7: 47},\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00720', 2: 'P00669', 3: 'P00669',\n 4: 'P00720', 5: 'P00720', 6: 'P00720', 7: 'P00720'},\n 'RES_END': {\n 0: 40, 1: 165, 2: 124, 3: 124, 4: 44, 5: 166,\n 6: 44, 7: 166},\n 'PDB': {\n 0: '102L', 1: '102L', 2: '11BG', 3: '11BG', 4: '104L',\n 5: '104L', 6: '104L', 7: '104L'}\n }\n\n create_pdb_composite_expected = {\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00669', 2: 'P00720', 3: 'P00720',\n 4: 'P00669'},\n 'SEC_STRUCT': {\n 0: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',\n 1: '--------------------------PPPHHHHHHHHHBPTTPPTTPGGGHHHHHHHHTTPSSSSPPSEEEEEPSPHHHHHGGGGSEEEPPSSSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP',\n 2: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',\n 3: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',\n 4: '--------------------------PPPHHHHHHHHHBPPPSSTTPGGGHHHHHHHHTTPPSSSPPSEEEEEPSPHHHHHGGGGSEEEPPTTSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP'},\n 'PDB_CHAIN': {\n 0: '104L_B', 1: '11BG_B', 2: '102L_A', 3: '104L_A',\n 4: '11BG_A'}\n }\n\n create_uni_struct_input = {\n 'SP_PRIMARY': {\n 0: 'P00720', 1: 'P00669', 2: 'P00720',\n 3: 'P00720', 4: 'P00669'},\n 'SEC_STRUCT': {\n 0: 'PPHHHHHHHHHPPPSBPEEPTTSPEEPTTTPPPPPPSSPPHHHHHHHHHSPSPPTTBPPHHHHHHHHHHHHHHHHHHHHTPTTHHHHHHHSPSSHHHHHHHHHHHHPHHHHHHPHHHHHHHHTTPTTHHHHHHTSSHHHHHSPHHHHHHHHHHHSPSGGGGPXX',\n 1: '--------------------------PPPHHHHHHHHHBPTTPPTTPGGGHHHHHHHHTTPSSSSPPSEEEEEPSPHHHHHGGGGSEEEPPSSSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP',\n 2: 'PPHHHHHHHHHPPEEEEEEPTTSPEEEETTEEEESSSPTTHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHHHPTTHHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHHHTTPHHHHHHHHHSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',\n 3: 'PPHHHHHHHHTPPPSBPEEPTTSPEEETTTEEEEPPTTPPHHHHHHHHHHTSPPTTBPPHHHHHHHHHHHHHHHHHHHTTPTTTHHHHHHSPHHHHHHHHHHHHHHHHHHHHTPHHHHHHTTTTPHHHHHHHTTSSHHHHHSHHHHHHHHHHHHHSSSGGGPXX',\n 4: '--------------------------PPPHHHHHHHHHBPPPSSTTPGGGHHHHHHHHTTPPSSSPPSEEEEEPSPHHHHHGGGGSEEEPPTTSPSPEEEPSSPEEEEEEEEPTTPBTTBPPEEEEEEEEPEEEEEETTTTEEEEEEEEP'},\n 'Unnamed: 0': {0: 0, 1: 1, 2: 2, 3: 3, 4: 4},\n 'PDB_CHAIN': {\n 0: '104L_B', 1: '11BG_B', 2: '102L_A',\n 3: '104L_A', 4: '11BG_A'}\n }\n\n create_uni_struct_expected = {\n 'SP_PRIMARY': {0: 'P00720', 1: 'P00669'},\n 'STRUCT': {\n 0: 'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXX',\n 1: '--------------------------OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'}\n }\n\n create_intervals_pdb_df = {\n 'SP_PRIMARY': {\n 0: 'Q5SLQ0', 1: 'Q5SLQ0', 2: 'Q805F6', 3: 'Q5SLQ0',\n 4: 'Q5SLQ0', 5: 'Q5SLQ0', 6: 'Q5SLQ0', 7: 'Q5SLQ0',\n 8: 'Q5SLQ0', 9: 'Q5SLQ0', 10: 'Q5SLQ0', 11: 'Q5SLQ0',\n 12: 'Q5SLQ0', 13: 'Q5SLQ0', 14: 'Q805F6', 15: 'Q5SLQ0',\n 16: 'Q5SLQ0', 17: 'Q5SLQ0', 18: 'Q5SLQ0', 19: 'Q5SLQ0',\n 20: 'Q5SLQ0', 21: 'Q5SLQ0', 22: 'Q5SLQ0', 23: 'Q5SLQ0',\n 24: 'Q5SLQ0', 25: 'Q5SLQ0', 26: 'Q5SLQ0', 27: 'Q5SLQ0',\n 28: 'Q5SLQ0', 29: 'Q5SLQ0', 30: 'Q5SLQ0', 31: 'Q5SLQ0',\n 32: 'Q5SLQ0', 33: 'Q5SLQ0', 34: 'Q5SLQ0', 35: 'Q5SLQ0',\n 36: 'Q5SLQ0', 37: 'Q5SLQ0', 38: 'Q5SLQ0', 39: 'Q5SLQ0',\n 40: 'Q5SLQ0', 41: 'Q5SLQ0', 42: 'Q5SLQ0', 43: 'Q5SLQ0',\n 44: 'Q5SLQ0', 45: 'Q5SLQ0', 46: 'Q5SLQ0', 47: 'Q5SLQ0',\n 48: 'Q5SLQ0', 49: 'Q5SLQ0'},\n 'SEC_STRUCT': {\n 0: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 1: 'XXXXXXXXXXXXXXXPPPSPPHHHHSPSEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPBPPP',\n 2: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXPPTTEETTTTEEPTTPSPSSSTTEETTEEPPTTPEEEPPSSSSPPEEPPSSPSSPPPPPXX-',\n 3: '-XXXXXXXXXXXXXXPPPSPPTTTSSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 4: '------------------PPPSTTTSPSPPSSPTTPHHHHHHTBPTTPPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 5: 'XXXXXXXXXXXXXXXXXPPPPTTTTPPSEETTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 6: 'XXXXXXXXXXXXXXXXXXPPPGGGSPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 7: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPPEETTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 8: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 9: '-XXXXXXXXXXXXXXPPPSPPTTTSSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 10: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPSEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 11: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEESSPSSPPTTTGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 12: 'XXXXXXXXXXXXXXXPPPSPPHHHHSPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 13: 'XXXXXXXXXXXXXXXXXXPPPSTTSSPPEESSPSSPHHHHTTSSPSSSPPPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 14: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXPPTTEETTTTEEPTTPSPSSSTTEETTEEPPTTPEEEPPPTTPPPEEPPSSPSSPPXXXXX-',\n 15: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPSPBSSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 16: 'XXXXXXXXXXXXXXXPPPSPPHHHHSSSEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 17: 'XXXXXXXXXXXXXXXPPPSPPTTTTSPSEESSPSSPHHHHHTTBPSSSPBPPHHHHTPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 18: 'XXXXXXXXXXXXXXXXXXPPPTTSSSPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 19: 'XXXXXXXXXXXXXXXPPPSSPGGGGPPSPPTTPSSPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 20: '------------------PPPTTTTPSSEETTPTTPHHHHGGGBPTTPPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 21: 'XXXXXXXXXXXXXXXXXXPPPSTTSSPPEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 22: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 23: '-XXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 24: '-XXXXXXXXXXXXXXPPPSPPTTTSSPPPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 25: '------------------PPPHHHHPPPEETTPSSPHHHHGGGBPTTPPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPPBPP',\n 26: 'XXXXXXXXXXXXXXXXXXPPPSGGGPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 27: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSPPSSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 28: 'XXXXXXXXXXXXXXXPPPSPPHHHHSPSEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 29: '-XXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHHTTBPSSSPBPPHHHHPPPTTHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 30: 'XXXXXXXXXXXXXXXXXXPPPSTTSPPSPPSSPTTPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 31: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEETTPTTPHHHHGGGBPSSSPBPPHHHHTPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 32: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSPPSSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 33: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPSEESSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 34: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSPPTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 35: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPSSPPTTGGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 36: '------------------PPPTTTTPPPEETTPSSPHHHHGGGBPTTSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 37: 'XXXXXXXXXXXXXXXXXXPPPSTTSPPSPPSSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 38: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPSPBTTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 39: '-XXXXXXXXXXXXXXPPPSPPTTTSSPSEETTPTTPHHHHGGGBPSSSSBPPHHHHPPPTTHHHHHHHHHHHHHHHTSSPSPPPBPPP',\n 40: 'XXXXXXXXXXXXXXXXXXPPPTTTSSPPPBTTPTTPHHHHGGGSPSSSPPPPTTTSPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 41: 'XXXXXXXXXXXXXXXPPPSPPTTTSSPPEETTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 42: 'XXXXXXXXXXXXXXXPPPSPPHHHHPPSEETTPTTPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPEEPP',\n 43: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 44: 'XXXXXXXXXXXXXXXXXXPPPTTTTSPPPBTTPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHHTSSPSPPPBPPP',\n 45: 'XXXXXXXXXXXXXXXXXXPPPGGGSPPSPPSSPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 46: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEESSPTTPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 47: 'XXXXXXXXXXXXXXXPPPSSPHHHHSPSPPSSPSSPHHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPBPPP',\n 48: 'XXXXXXXXXXXXXXXXXXPPPTTTSPPSEETTPSSPHHHHGGGBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP',\n 49: 'XXXXXXXXXXXXXXXXXPPPPHHHHPPSEETTPSSPPHHHHTTBPSSSPBPPHHHHPPPHHHHHHHHHHHHHHHHTTSSPSPPPEEPP'},\n 'PDB_CHAIN': {\n 0: '4DV6_R', 1: '4LFB_R', 2: '3C05_B', 3: '2UU9_R',\n 4: '4K0K_R', 5: '4DR6_R', 6: '4DR3_R', 7: '4NXM_R',\n 8: '4LF7_R', 9: '2UUB_R', 10: '4LF5_R', 11: '4JI4_R',\n 12: '4LF9_R', 13: '4JI8_R', 14: '3C05_D', 15: '4JI3_R',\n 16: '4DR4_R', 17: '4LFC_R', 18: '4JI6_R', 19: '4JI1_R',\n 20: '4JYA_R', 21: '4DV1_R', 22: '4OX9_R', 23: '2UUA_R',\n 24: '2UUC_R', 25: '4JV5_R', 26: '4JI2_R', 27: '4DV4_R',\n 28: '4DR5_R', 29: '2UXB_R', 30: '4DUZ_R', 31: '4DUY_R',\n 32: '4DV7_R', 33: '4DV5_R', 34: '4DV3_R', 35: '4LF6_R',\n 36: '4KHP_R', 37: '4DV0_R', 38: '4LF4_R', 39: '2UXD_R',\n 40: '4JI5_R', 41: '4LF8_R', 42: '4DR2_R', 43: '4DV2_R',\n 44: '4JI7_R', 45: '4DR1_R', 46: '4JI0_R', 47: '4LFA_R',\n 48: '4NXN_R', 49: '4DR7_R'}\n }\n\n create_intervals_uni_df = {\n 'SP_PRIMARY': {0: 'Q5SLQ0', 1: 'Q805F6'},\n 'STRUCT': {\n 0: 'XXXXXXXXXXXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',\n 1: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXXXX-'}\n }\n\n create_intervals_expected = {\n 'SP_PRIMARY': {15401: 'Q805F6', 10068: 'Q5SLQ0'},\n 'STRUCT': {\n 15401: '----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------XXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXXXX-',\n 10068: 'XXXXXXXXXXXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'},\n 'MISSING': {\n 15401: [['conserved', (418, 421)], ['contained', (477, 482)]],\n 10068: [['contained', (0, 18)]]}\n }\n\n\nclass ScoresData:\n uni_df = {\n 'SP_PRIMARY': {\n 0: 'P30615',\n 139: 'P62805',\n 102: 'Q8KRK5'\n },\n\n 'STRUCT': {\n 0: 'XOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',\n 139: 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOXXXXXXXXXXX',\n 102: '----------XXXXXXXXXOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'\n },\n\n 'MISSING': {\n 0: [\n ['conserved', (0, 1)],\n ['conflict', (95, 97)]\n ],\n 139: [\n ['overlap', (0, 28)],\n ['conflict', (92, 103)]\n ],\n 102: [\n ['conserved', (10, 19)]\n ]\n }\n }\n\n create_scores_dict_expected = {\n 'iup_short': [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 1, 1, 1],\n\n 'disordp_rna': [\n 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n\n 'esp_xray': [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0],\n\n 'disordp_dna': [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\n 'dynamine': [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 1, 1, 1, 1, 1, 1],\n\n 'anchor_def': [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0],\n\n 'disordp_pro': [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ],\n\n 'morfpred': [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0\n ]\n }\n\n test_predictions_expected = {\n 'anchor_def': {\n '-': 0.0,\n 'contained': 0.0,\n 'overlap': 0.0,\n 'discarded': 0.0,\n 'conserved': 0.0,\n 'O': 0.0,\n 'X': 0.0,\n 'conflict': 0.0},\n\n 'disordp_rna': {\n '-': 0.0,\n 'contained': 0.0,\n 'overlap': 18.0,\n 'discarded': 0.0,\n 'conserved': 0.0,\n 'O': 41.0,\n 'X': 20.0,\n 'conflict': 2.0},\n\n 'esp_xray': {\n '-': 10.0,\n 'contained': 0.0,\n 'overlap': 20.0,\n 'discarded': 0.0,\n 'conserved': 9.0,\n 'O': 10.0,\n 'X': 29.0,\n 'conflict': 0.0},\n\n 'morfpred': {\n '-': 0.0,\n 'contained': 0.0,\n 'overlap': 3.0,\n 'discarded': 0.0,\n 'conserved': 0.0,\n 'O': 3.0,\n 'X': 3.0,\n 'conflict': 0.0},\n\n 'iup_short': {\n '-': 10.0,\n 'contained': 0.0,\n 'overlap': 28.0,\n 'discarded': 0.0,\n 'conserved': 9.0,\n 'O': 6.0,\n 'X': 41.0,\n 'conflict': 4.0},\n\n 'disordp_dna': {\n '-': 0.0,\n 'contained': 0.0,\n 'overlap': 27.0,\n 'discarded': 0.0,\n 'conserved': 1.0,\n 'O': 9.0,\n 'X': 28.0,\n 'conflict': 0.0},\n\n 'total': {\n '-': 10.0,\n 'contained': 0.0,\n 'overlap': 28.0,\n 'discarded': 0.0,\n 'conserved': 9.0,\n 'O': 106.0,\n 'X': 48.0,\n 'conflict': 11.0},\n\n 'disordp_pro': {\n '-': 0.0,\n 'contained': 0.0,\n 'overlap': 0.0,\n 'discarded': 0.0,\n 'conserved': 0.0,\n 'O': 0.0,\n 'X': 0.0,\n 'conflict': 0.0},\n\n 'dynamine': {\n '-': 10.0,\n 'contained': 0.0,\n 'overlap': 17.0,\n 'discarded': 0.0,\n 'conserved': 9.0,\n 'O': 9.0,\n 'X': 32.0,\n 'conflict': 6.0}\n }\n\n test_fill_data_expected = {\n 'esp_xray-iup_short': {\n 'conserved': 1.0,\n 'contained': 0.0,\n 'conflict': 0.63636363636363635,\n 'overlap': 0.7142857142857143},\n\n 'dynamine-esp_xray': {\n 'conserved': 1.0,\n 'contained': 0.0,\n 'conflict': 0.45454545454545453,\n 'overlap': 0.8928571428571429},\n\n 'iup_short-dynamine': {\n 'conserved': 1.0,\n 'contained': 0.0,\n 'conflict': 0.81818181818181823,\n 'overlap': 0.6071428571428571}\n }\n\n\nclass UniData:\n P00720 = \"\"\"\\\n>sp|P00720|ENLYS_BPT4 Endolysin OS=Enterobacteria phage T4 GN=E PE=1 SV=2\nMNIFEMLRIDERLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNCNGVITK\nDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRCALINMVFQMGETGVAGFTNSLRM\nLQQKRWDEAAVNLAKSIWYNQTPNRAKRVITTFRTGTWDAYKNL\n\"\"\"\n\n P02185 = \"\"\"\\\n>sp|P02185|MYG_PHYCD Myoglobin OS=Physeter catodon GN=MB PE=1 SV=2\nMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASE\nDLKKHGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRH\nPGDFGADAQGAMNKALELFRKDIAAKYKELGYQG\n\"\"\"\n\n\nclass TsvData:\n pdb_seq_tsv_valid = \"\"\"\\\n\\tPDB\\tCHAIN\\tSP_PRIMARY\\tRES_BEG\\tRES_END\\tSP_BEG\\tSP_END\\tPDB_SEQ\n0\\t101M\\tA\\tP02185\\t1\\t154\\t1\\t154\\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRVKHLKTEAEMKASEDLKKHGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG\n1\\t102L\\tA\\tP00720\\t1\\t40\\t1\\t40\\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN\n2\\t102L\\tA\\tP00720\\t42\\t165\\t41\\t164\\tAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL\n3\\t102M\\tA\\tP02185\\t1\\t154\\t1\\t154\\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASEDLKKAGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG\n4\\t103L\\tA\\tP00720\\t1\\t40\\t1\\t40\\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN\n\"\"\"\n\n # github #25 Need one or two examples of obsolete proteins in pdb_seq.tsv\n # TODO: Replace entries 5 and 6 below with real ones.\n pdb_seq_tsv_with_obs = \"\"\"\\\n\\tPDB\\tCHAIN\\tSP_PRIMARY\\tRES_BEG\\tRES_END\\tSP_BEG\\tSP_END\\tPDB_SEQ\n0\\t101M\\tA\\tP02185\\t1\\t154\\t1\\t154\\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRVKHLKTEAEMKASEDLKKHGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG\n1\\t102L\\tA\\tP00720\\t1\\t40\\t1\\t40\\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN\n2\\t102L\\tA\\tP00720\\t42\\t165\\t41\\t164\\tAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDAVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL\n3\\t102M\\tA\\tP02185\\t1\\t154\\t1\\t154\\tMVLSEGEWQLVLHVWAKVEADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASEDLKKAGVTVLTALGAILKKKGHHEAELKPLAQSHATKHKIPIKYLEFISEAIIHVLHSRHPGNFGADAQGAMNKALELFRKDIAAKYKELGYQG\n4\\t103L\\tA\\tP00720\\t1\\t40\\t1\\t40\\tMNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLN\n5\\t104M\\tA\\tP45678\\t1\\t40\\t1\\t50\\tNOTAVALIDPROTEINORSEQUENCE\n6\\t104L\\tA\\tP45678\\t1\\t40\\t1\\t60\\tNOTAVALIDPROTEINORSEQUENCE\n\"\"\"\n","repo_name":"shellydeforte/PDB","sub_path":"pdb/tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":41341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"25083898066","text":"def find_prio_val(common_letter):\n # 'A' = 65\n # 'a' = 97\n if common_letter.isupper():\n val = ord(common_letter) - 64 + 26\n else:\n val = ord(common_letter) - 96\n\n return val\n \n\n# read in data\n# save in list\ndata = []\nwith open(\"./Python/3/input.txt\") as f:\n for line in f.readlines():\n data.append(line.strip())\n\npriority_sum = 0\n# find shared values\nfor backpack in data:\n # split string in half\n one, two = backpack[:int(len(backpack)/2)], backpack[int(len(backpack)/2):]\n\n # convert to set\n one, two = set(one), set(two)\n\n # perform set intersection\n common_letter = str(one.intersection(two))[2] # {'X'} -> string -> middle letter is target\n\n # find priority value\n priority_sum += find_prio_val(common_letter)\n \n\nprint(f\"Sum of priority values: {priority_sum}\")\n\n# part two:\n\ndata_2 = []\n# group data in groups of three\nfor i, backpack in enumerate(data):\n if i % 3 == 0:\n data_2.append([])\n data_2[-1].append(backpack)\n\npriority_sum_2 = 0\nfor group in data_2:\n # convert all backpacks to sets\n # perform set intersection on all of them\n one, two, three = group\n one, two, three = set(one), set(two), set(three)\n\n common_letter = str(one.intersection(two.intersection(three)))[2]\n\n # add priority value\n priority_sum_2 += find_prio_val(common_letter)\n\nprint(f\"Sum of priority values (part 2): {priority_sum_2}\")","repo_name":"VaradK62442/AoC2022","sub_path":"Python/3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"4496053007","text":"from __future__ import print_function\n# -----------------------------------------------------------------------\n# This is an example illustrating how to use customview in Python\n# The sample will allow you to open an assembly file and display it in color\n# (c) Hex-Rays\n#\n\nimport os\n\nimport ida_idaapi\nimport ida_kernwin\nimport ida_lines\nimport idautils\n\n# ----------------------------------------------------------------------\nclass asm_colorizer_t(object):\n def is_id(self, ch):\n return ch == '_' or ch.isalpha() or '0' <= ch <= '9'\n\n def get_identifier(self, line, x, e):\n i = x\n is_digit = line[i].isdigit()\n while i < e:\n ch = line[i]\n if not self.is_id(ch):\n if ch != '.' or not is_digit:\n break\n i += 1\n return (i, line[x:i])\n\n def get_quoted_string(self, line, x, e):\n quote = line[x]\n i = x + 1\n while i < e:\n ch = line[i]\n if ch == '\\\\' and line[i+1] == quote:\n i += 1\n elif ch == quote:\n i += 1 # also take the quote\n break\n i += 1\n return (i, line[x:i])\n\n def colorize(self, lines):\n for line in lines:\n line = line.rstrip()\n if not line:\n self.add_line()\n continue\n x = 0\n e = len(line)\n s = \"\"\n while x < e:\n ch = line[x]\n # String?\n if ch == '\"' or ch == \"'\":\n x, w = self.get_quoted_string(line, x, e)\n s += self.as_string(w)\n # Tab?\n elif ch == '\\t':\n s += ' ' * 4\n x += 1\n # Comment?\n elif ch == ';':\n s += self.as_comment(line[x:])\n # Done with this line\n break\n elif ch == '.' and x + 1 < e:\n x, w = self.get_identifier(line, x + 1, e)\n s += self.as_directive(ch + w)\n # Identifiers?\n elif self.is_id(ch):\n x, w = self.get_identifier(line, x, e)\n # Number?\n if ch.isdigit():\n s += self.as_num(w)\n # Other identifier\n else:\n s += self.as_id(w)\n # Output as is\n else:\n s += ch\n x += 1\n self.add_line(s)\n\n\nclass base_asmview_ah_t(ida_kernwin.action_handler_t):\n def __init__(self, obj):\n ida_kernwin.action_handler_t.__init__(self)\n self.obj = obj\n\n def update(self, ctx):\n if self.obj.view and self.obj.view.GetWidget() == ctx.widget:\n return ida_kernwin.AST_ENABLE_FOR_WIDGET\n else:\n return ida_kernwin.AST_DISABLE_FOR_WIDGET\n\n\nclass refresh_ah_t(base_asmview_ah_t):\n def activate(self, ctx):\n self.obj.view.reload_file()\n print(\"Reloaded\")\n\n\nclass close_ah_t(base_asmview_ah_t):\n def activate(self, ctx):\n self.obj.view.Close()\n print(\"Closed\")\n\n\n# -----------------------------------------------------------------------\nclass asmview_t(ida_kernwin.simplecustviewer_t, asm_colorizer_t):\n def Create(self, fn):\n # Create the customview\n if not ida_kernwin.simplecustviewer_t.Create(\n self,\n \"Viewing file - %s\" % os.path.basename(fn)):\n return False\n\n self.instruction_list = idautils.GetInstructionList()\n self.instruction_list.extend([\"ret\"])\n self.register_list = idautils.GetRegisterList()\n self.register_list.extend([\"eax\", \"ebx\", \"ecx\", \"edx\", \"edi\", \"esi\", \"ebp\", \"esp\"])\n\n self.fn = fn\n if not self.reload_file():\n return False\n\n return True\n\n def reload_file(self):\n if not self.colorize_file(self.fn):\n self.Close()\n return False\n return True\n\n def colorize_file(self, fn):\n try:\n f = open(fn, \"r\")\n lines = f.readlines()\n f.close()\n self.ClearLines()\n self.colorize(lines)\n return True\n except:\n return False\n\n def add_line(self, s=None):\n if not s:\n s = \"\"\n self.AddLine(s)\n\n def as_comment(self, s):\n return ida_lines.COLSTR(s, ida_lines.SCOLOR_RPTCMT)\n\n def as_id(self, s):\n t = s.lower()\n if t in self.register_list:\n return ida_lines.COLSTR(s, ida_lines.SCOLOR_REG)\n elif t in self.instruction_list:\n return ida_lines.COLSTR(s, ida_lines.SCOLOR_INSN)\n else:\n return s\n\n def as_string(self, s):\n return ida_lines.COLSTR(s, ida_lines.SCOLOR_STRING)\n\n def as_num(self, s):\n return ida_lines.COLSTR(s, ida_lines.SCOLOR_NUMBER)\n\n def as_directive(self, s):\n return ida_lines.COLSTR(s, ida_lines.SCOLOR_KEYWORD)\n\n def OnKeydown(self, vkey, shift):\n \"\"\"\n User pressed a key\n @param vkey: Virtual key code\n @param shift: Shift flag\n @return Boolean. True if you handled the event\n \"\"\"\n # ESCAPE\n if vkey == 27:\n self.Close()\n elif vkey == ord('H'):\n lineno = self.GetLineNo()\n if lineno is not None:\n line, fg, bg = self.GetLine(lineno)\n if line and line[0] != ida_lines.SCOLOR_INV:\n s = ida_lines.SCOLOR_INV + line + ida_lines.SCOLOR_INV\n self.EditLine(lineno, s, fg, bg)\n self.Refresh()\n elif vkey == ord('C'):\n self.ClearLines()\n self.Refresh()\n elif vkey == ord('S'):\n print(\"Selection (x1, y1, x2, y2) = \", self.GetSelection())\n elif vkey == ord('I'):\n print(\"Position (line, x, y) = \", self.GetPos(mouse = 0))\n else:\n return False\n return True\n\n# -----------------------------------------------------------------------\nACTNAME_REFRESH = \"asmview_t::refresh\"\nACTNAME_CLOSE = \"asmview_t::close\"\n\nclass asmviewplg(ida_idaapi.plugin_t):\n flags = ida_idaapi.PLUGIN_KEEP\n comment = \"ASM viewer\"\n help = \"This is help\"\n wanted_name = \"ASM file viewer\"\n wanted_hotkey = \"Alt-F8\"\n def __init__(self):\n self.view = None\n\n def init(self):\n # Register actions\n ida_kernwin.register_action(\n ida_kernwin.action_desc_t(\n ACTNAME_REFRESH, \"Refresh\", refresh_ah_t(self)))\n ida_kernwin.register_action(\n ida_kernwin.action_desc_t(\n ACTNAME_CLOSE, \"Close\", close_ah_t(self)))\n return ida_idaapi.PLUGIN_KEEP\n\n def run(self, arg):\n if self.view:\n self.Close()\n fn = ida_kernwin.ask_file(0, \"*.asm\", \"Select ASM file to view\")\n if not fn:\n return\n self.view = asmview_t()\n if not self.view.Create(fn):\n return\n self.view.Show()\n widget = self.view.GetWidget()\n\n # Attach actions to this widget's popup menu\n ida_kernwin.attach_action_to_popup(widget, None, ACTNAME_REFRESH)\n ida_kernwin.attach_action_to_popup(widget, None, ACTNAME_CLOSE)\n\n def term(self):\n if self.view:\n self.view.Close()\n\ndef PLUGIN_ENTRY():\n return asmviewplg()\n","repo_name":"idapython/src","sub_path":"Scripts/AsmViewer.py","file_name":"AsmViewer.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","stars":1299,"dataset":"github-code","pt":"25"}
+{"seq_id":"72342967746","text":"#Memoization\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n \n \n def helper(m,i,j,dp):\n \n if i==0 and j==0:\n return m[i][j]\n \n if i<0 or j<0:\n return float(\"inf\")\n if dp[i][j] != -1:\n return dp[i][j]\n top = m[i][j]+helper(m,i-1,j,dp)\n left = m[i][j]+helper(m,i,j-1,dp)\n \n dp[i][j] = min(top,left)\n return dp[i][j]\n \n m = len(grid)\n n = len(grid[0])\n dp = [[-1 for x in range(n)]for y in range(m)]\n ans = helper(grid,m-1,n-1,dp)\n return ans\n #Bottom up\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n m = len(grid)\n n = len(grid[0])\n dp = [[float(\"inf\") for x in range(n)]for y in range(m)]\n \n for i in range(m):\n for j in range(n):\n if i==0 and j==0:\n dp[i][j] = grid[i][j]\n else:\n dp[i][j] = min(grid[i][j]+dp[i-1][j],grid[i][j]+dp[i][j-1])\n \n return dp[-1][-1]\n","repo_name":"fahadahasmi/StriverSdeChallenge","sub_path":"MinimumPathSum.py","file_name":"MinimumPathSum.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"10757202858","text":"# fmt: off\nimport sys\n\npackage_path = '/Users/maxwuerfek/code/diss/spike_corr/build/lib.macosx-11.1-arm64-cpython-38'\nsys.path.append(package_path)\n\nimport STTC\nimport numpy as np\n\n\nst1 = np.array([2.1, 6, 10])\nst2 = np.array([1, 2, 2.2, 5])\nst3 = np.array([2, 3, 6.4, 10.8])\ntime = np.array([0, 11])\ndt = 0.5\n\nf = STTC.sttc(st1, st2, dt, time)\nprint(f) # 0.1550688802543270?\n\nsts = [st1, st2, st3]\nm = STTC.tiling(sts, dt, time)\n\nprint(m)\n","repo_name":"maxwrf/diss","sub_path":"old/test_sttc_C.py","file_name":"test_sttc_C.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"10077698162","text":"# -*-coding:utf-8 -*-\n# @Time :2019/9/610:22\n# @Author : liupengrui\n# @FileName :替换空格.py\n\n\ndef replaceSpace(s):\n # write code here\n count = 0\n for i in range(len(s)):\n if s[i] == ' ':\n count = count + 1\n p2 = count * 3 + len(s)\n p1 = len(s)\n ss = ''\n for j in range(len(s) - 1, -1, -1):\n if s[j] != ' ':\n ss = s[j] + ss\n print(ss)\n p1 = p1 - 1\n p2 = p2 - 1\n else:\n if p1 == p2:\n ss = s[j] + ss\n print('ii')\n else:\n ss = '%20' + ss\n p2 = p2 - 3\n p1 = p1 - 1\n print(ss)\n\n\nreplaceSpace('hello world gt')\n","repo_name":"FanQiHang/Algorithm","sub_path":"Algorithmic Questions and Solutions/剑指offer/替换空格.py","file_name":"替换空格.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"19764527981","text":"# class Solution(object):\n# def maxProduct(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: int\n# \"\"\"\n# if len(nums)==1:\n# return nums[0]\n#\n# newnum=[]\n# mul=1\n# l=0\n# zero=0\n# while l0:\n# mul*=nums[l]\n# l+=1\n# flag=1\n# if flag:\n# newnum.append(mul)\n# mul=1\n# if lmaxm:\n maxm=a[i]\n return maxm\n\n\n\na=Solution()\ntest= [2,3,-2,4]\nprint(a.maxProduct(test))\ntest=[-2,0,-1]\nprint(a.maxProduct(test))\ntest=[3,-1,4]\nprint(a.maxProduct(test))\n\n","repo_name":"saleed/LeetCode","sub_path":"152.py","file_name":"152.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"71273455747","text":"#!/usr/bin/env python\nimport contexts\nimport importlib\nimport inspect\nimport random\nimport parser\nfrom django.http import HttpResponse\nfrom django.template import defaultfilters\nfrom django.template.base import Variable\nfrom django.template.base import VariableDoesNotExist\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\n\n# Roles\nTAINT_SOURCE = 1\nTAINT_SANITIZER = 2\nTAINT_SINK = 3\nPARSER = 4\nRESOLVER = 5\n\n# Registry of context sequences\nREGISTRY = {}\nVALUES = {}\n\ndef translate_function(function):\n def wrapper(*args, **kwargs):\n result = function(*args, **kwargs)\n seqs = set()\n for arg in args:\n append_sequences(seqs, arg)\n for arg in kwargs.keys():\n append_sequences(seqs, arg)\n for arg in kwargs.values():\n append_sequences(seqs, arg)\n\n if seqs:\n result = translate(result, seqs)\n\n return result\n return wrapper\n\n\nord = translate_function(ord)\nchr = translate_function(chr)\n\n# Harmless methods\nTAINT_FREE_METHODS = set([\n '__cmp__',\n '__eq__',\n '__float__',\n '__getattr__',\n '__getattribute__',\n '__init__',\n '__int__',\n '__new__',\n '__nonzero__',\n '__reduce__',\n '__reduce_ex__',\n '__str__',\n '__unicode__',\n])\n\n\ndef get_methods_to_patch(prototype):\n methods = set(prototype.__dict__.keys())\n return methods - TAINT_FREE_METHODS\n\n\ndef translate_method(method):\n def wrapper(self, *args, **kwargs):\n result = method(self, *args, **kwargs)\n seqs = set()\n for arg in args:\n append_sequences(seqs, arg)\n for arg in kwargs.keys():\n append_sequences(seqs, arg)\n for arg in kwargs.values():\n append_sequences(seqs, arg)\n\n seqs.update(self.sequences)\n return translate(result, seqs)\n return wrapper\n\n\ndef is_tainted(obj):\n if not hasattr(obj, 'sequences'):\n return False\n return bool(obj.sequences)\n\n\ndef create_extension(prototype):\n if hasattr(prototype, \"sequences\"):\n return prototype\n\n methods = get_methods_to_patch(prototype)\n class result(prototype):\n def __new__(cl, *args, **kwargs):\n self = super(result, cl).__new__(cl, *args, **kwargs)\n self.sequences = set()\n\n for arg in args:\n append_sequences(self.sequences, arg)\n for arg in kwargs.keys():\n append_sequences(self.sequences, arg)\n for arg in kwargs.values():\n append_sequences(self.sequences, arg)\n\n return self\n\n def __reduce__(self):\n return (prototype, (prototype(self), ))\n\n\n for name, value in [(m, prototype.__dict__[m]) for m in methods]:\n if inspect.ismethod(value) or inspect.ismethoddescriptor(value):\n setattr(result, name, translate_method(value))\n\n #unicode\n if prototype == unicode:\n setattr(result, '__rmod__', lambda self, other: result.__mod__(result(other), self))\n\n #string\n if '__add__' in methods and '__radd__' not in methods:\n setattr(result, '__radd__', lambda self, other: result.__add__(result(other), self))\n\n return result\n\n\ndef extend(obj, sequences):\n current_type = type(obj)\n superclass = create_extension(current_type)\n res = superclass(obj)\n res.sequences.update(sequences)\n return res\n\n\ndef translate(obj, sequences):\n if isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set):\n prototype = type(obj)\n return prototype(translate(k, sequences) for k in obj)\n elif isinstance(obj, dict):\n prototype = type(obj)\n return prototype((translate(k, sequences), translate(v, sequences)) for k, v in obj.iteritems())\n elif isinstance(obj, bool):\n return obj\n elif isinstance(obj, str) or isinstance(obj, unicode) or isinstance(obj, int) or isinstance(obj, float):\n return extend(obj, sequences)\n else:\n return obj\n\n\ndef create_placeholder(arg, position):\n seqs = get_sequences(arg)\n if not seqs:\n return \"\"\n result = str()\n for i in range(0, 16):\n result += chr(random.randint(97, 122))\n REGISTRY[result] = seqs\n VALUES[result] = position\n return result\n\n\ndef duplicate_placeholder(val):\n result = str()\n for i in range(0, 16):\n result += chr(random.randint(97, 122))\n REGISTRY[result] = REGISTRY[val]\n VALUES[result] = VALUES[val]\n return result\n\n\ndef make_source(obj):\n return translate(obj, set(['']))\n\n\ndef sanitize(obj, sequences, contexts):\n if not sequences:\n return obj\n newseqs = set()\n for seq in sequences:\n for ctx in contexts:\n newseqs.add(seq + ctx)\n return translate(obj, newseqs)\n\n\ndef append_sequences(dst, src):\n if not hasattr(src, 'sequences'):\n return\n dst.update(src.sequences)\n\n\ndef get_sequences(*args, **kwargs):\n seqs = set()\n for arg in args:\n append_sequences(seqs, arg)\n for arg in kwargs.keys():\n append_sequences(seqs, arg)\n for arg in kwargs.values():\n append_sequences(seqs, arg)\n return seqs\n\n\ndef resolve(self, context, ignore_failures=False):\n if isinstance(self.var, Variable):\n try:\n from datetime import datetime\n start = datetime.now()\n obj = self.var.resolve(context)\n end = datetime.now()\n return end - start\n except VariableDoesNotExist:\n if str(self.var).find('request.GET') == 0:\n with open(\"/home/user/Desktop/get_args.txt\", \"a\") as outfile:\n outfile.write(str(self.var)[len(\"request.GET.\"):] + \"\\n\")\n\n\nfrom datetime import timedelta\nparser_us = timedelta(seconds=0)\ntaint_us = timedelta(seconds=0)\njs_us = timedelta(seconds=0)\ncss_us = timedelta(seconds=0)\nuri_us = timedelta(seconds=0)\n\n\ndef get_wrapper(function, role, contexts=None):\n def wrapper(*args, **kwargs):\n from datetime import datetime\n start = datetime.now()\n subtraction = None\n if role == TAINT_SANITIZER:\n backup_seqs = get_sequences(*args, **kwargs)\n elif role == RESOLVER:\n subtraction = resolve(*args, **kwargs)\n elif role == TAINT_SINK:\n if str(type(args[0])) == \"\" or str(type(args[0])) == \"\":\n position = unicode(args[0].origin) + '@' + unicode(args[0].lineno) + '@' + unicode(args[0].order) + '@' + unicode(args[0].filter_expression)\n else:\n import traceback\n position = ''\n for line in traceback.format_stack():\n position += line.strip()\n position = position.replace(\"\\n\", \"\")\n end = datetime.now()\n diff = end - start\n if subtraction is not None:\n diff -= subtraction\n res = function(*args, **kwargs)\n start = datetime.now()\n if role == TAINT_SOURCE:\n ret = make_source(res)\n elif role == TAINT_SANITIZER:\n ret = sanitize(res, backup_seqs, contexts)\n elif role == TAINT_SINK:\n if position.find(\"runtime/env/lib/python2.7/site-packages/django/template/base.py\") == -1:\n ret = create_placeholder(res, position) + unicode(res)\n else:\n ret = res\n elif role == PARSER:\n ret = library_handler(res)\n elif role == RESOLVER:\n ret = res\n end = datetime.now()\n diff += end - start\n if role == PARSER:\n global parser_us\n parser_us += diff\n else:\n global taint_us\n taint_us += diff\n return ret\n return wrapper\n\n\ndef patch(function_name, role, contexts=None):\n source, func_name = find_function(function_name)\n function = getattr(source, func_name)\n wrapper = get_wrapper(function, role, contexts)\n setattr(source, func_name, wrapper)\n\n\ndef find_function(function_name):\n pieces = function_name.split('.')\n try:\n for i in range(0, len(pieces)):\n subarr = pieces[0:i+1]\n module_name = '.'.join(subarr)\n mod = importlib.import_module(module_name)\n func_name = '.'.join(pieces[i+1:])\n except ImportError:\n pass\n\n if func_name.find('.') == -1:\n return mod, func_name\n else:\n class_obj = getattr(mod, func_name[0:func_name.find('.')])\n return class_obj, func_name[func_name.find('.')+1:]\n\n\ndef cleanup_placeholders(placeholders, content):\n remove = []\n for ph in placeholders:\n new_value = content.replace(ph, '')\n if new_value != content:\n remove.append(ph)\n content = new_value\n for ph in remove:\n del parser.DETECTION[ph]\n del REGISTRY[ph]\n del VALUES[ph]\n return content\n\n\ndef library_handler(response):\n if hasattr(response, 'content'):\n placeholders = parser.parse(response.content)\n response.content = cleanup_placeholders(placeholders, response.content)\n return response\n\ndef safe(text):\n seqs = get_sequences(text)\n if seqs:\n return translate(mark_safe(text), seqs)\n else:\n return mark_safe(text)\n\n\ndef patchdjango():\n patch('django.http.request.QueryDict.get', TAINT_SOURCE)\n patch('django.http.request.QueryDict.__getitem__', TAINT_SOURCE)\n patch('django.db.models.sql.compiler.SQLCompiler.patch', TAINT_SOURCE)\n patch(\n 'django.utils.html.escape',\n TAINT_SANITIZER,\n set([contexts.HTML_TEXT, contexts.HTML_QUOT_ATTR, contexts.HTML_APOS_ATTR])\n )\n patch(\n 'django.utils.html.escapejs',\n TAINT_SANITIZER,\n set([contexts.HTML_JS_DATA, contexts.HTML_QUOT_JS, contexts.HTML_APOS_JS])\n )\n patch(\n 'urllib.urlencode',\n TAINT_SANITIZER,\n set([contexts.HTML_QUOT_URI, contexts.HTML_APOS_URI, contexts.CSS_QUOT_URI, contexts.CSS_APOS_URI])\n )\n patch(\n 'django.utils.six.moves.urllib.parse.urlencode',\n TAINT_SANITIZER,\n set([contexts.HTML_QUOT_URI, contexts.HTML_APOS_URI, contexts.CSS_QUOT_URI, contexts.CSS_APOS_URI])\n )\n patch(\n 'django.utils.html.conditional_escape',\n TAINT_SINK\n )\n defaultfilters.register.filter(name='safe', filter_func=stringfilter(safe), is_safe=True)\n patch(\n 'django.template.debug.DebugVariableNode.render',\n TAINT_SINK\n )\n patch(\n 'django.core.handlers.base.BaseHandler.get_response',\n PARSER\n )\n patch(\n 'django.template.base.FilterExpression.resolve',\n RESOLVER\n )\n patch(\n 'django.template.base.VariableNode.render',\n TAINT_SINK\n )\n","repo_name":"disconnect3d/djangochecker","sub_path":"library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":10796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"27817486740","text":"\nfrom datetime import datetime\n\nfrom django.core.validators import RegexValidator\nfrom djongo import models\n\n\nclass Brand(models.Model):\n name = models.CharField(max_length=128)\n short_hand = models.CharField(\n max_length=4,\n unique=True,\n validators=[\n RegexValidator(\n regex='[A-Z]{1,4}',\n message='Short hand not valid',\n code='invalid_short_hand'\n ),\n ]\n )\n create_date = models.DateTimeField(default=datetime.now, null=False)\n\n class Meta:\n db_table = 'brands'\n","repo_name":"LuqmanSahaf/discount-codes","sub_path":"DiscountCodes/DiscountCodes/models/brand.py","file_name":"brand.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"16561926863","text":"class Link:\n empty = ()\n def __init__(self, first, rest=empty):\n assert rest is Link.empty or isinstance(rest, Link)\n self.first = first\n self.rest = rest\n\n def __str__(self):\n if is_empty(self.rest):\n return str(self.first)\n else:\n start = str(self.first)\n start += ' -> '\n return start + str(self.rest) \n\n def __repr__(self):\n if is_empty(self.rest):\n return 'Link({})'.format(self.first)\n else:\n s = 'Link({}, {})'.format(self.first, repr(self.rest))\n return s\n\ndef is_empty(link):\n if link is Link.empty:\n return True\n\ndef extend(a, b):\n if is_empty(a.rest):\n a.rest = b\n return a\n else:\n a.rest = extend(a.rest, b)\n return a\n\ndef reverse_link(a):\n \"\"\"\n >>> a = Link(1, Link(2, Link(3)))\n >>> reverse_link(a)\n Link(3, Link(2, Link(1)))\n \"\"\"\n current = a\n reverse = Link.empty\n while not is_empty(current):\n rest_of_current = current.rest\n current.rest = reverse\n reverse = current\n current = rest_of_current\n return reverse\n\ndef conserve_links(a, b):\n \"\"\"Makes Linked List a share as many Link instances as possible with\n Linked List b.a can use b's i-th Link instance as its i-th Link\n instance if a and b have the same element at position i.\n Should mutate a. b is allowed to be destroyed. Returns the new first\n Link instance of a.\n\n >>> x = Link(1, Link(2, Link(3, Link(4, Link(5, Link(6))))))\n >>> y = Link(1, Link(9, Link(3, Link(4, Link(9, Link(6))))))\n >>> z = conserve_links(x, y)\n >>> curr_x, curr_z = x, z\n >>> while curr_z is not Link.empty:\n ... assert curr_z.first == curr_x.first\n ... curr_x, curr_z = curr_x.rest, curr_z.rest\n >>> assert z == y\n >>> assert z.rest.rest == y.rest.rest\n >>> assert z.rest.rest.rest.rest.rest == y.rest.rest.rest.rest.rest\n >>> assert z.rest.rest.rest.rest.rest == y.rest.rest.rest.rest.rest\n \"\"\"\n if is_empty(a.rest) and is_empty(b.rest):\n if a.first == b.first:\n return b\n else:\n return a\n else:\n if a.first == b.first:\n b.rest = conserve_links(a.rest, b.rest)\n return b\n else:\n a.rest = conserve_links(a.rest, b.rest)\n return a\n\ndef slice_reverse(s, i, j):\n \"\"\"\n >>> s = Link(1, Link(2, Link(3)))\n >>> slice_reverse(s, 1, 2)\n >>> s\n Link(1, Link(2, Link(3)))\n >>> s = Link(1, Link(2, Link(3, Link(4, Link(5)))))\n >>> slice_reverse(s, 2, 4)\n >>> s\n Link(1, Link(2, Link(4, Link(3, Link(5)))))\n \"\"\"\n start = s\n for _ in range(i-1):\n start = start.rest \n reverse = Link.empty\n current = start.rest \n for _ in range(j-i):\n tmp = current.rest\n current.rest = reverse\n reverse = current\n current = tmp\n extend(reverse, current)\n start.rest = reverse\n","repo_name":"WangXin93/my-python-demo","sub_path":"cs61a/exam/ex_prep06/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"}
+{"seq_id":"71277464387","text":"N = int(input())\nS = [input() for _ in range(N)]\n\nS.sort()\n\nmax = 0\nOutput = []\npt = 0\nwhile pt < N:\n st,tmp = S[pt],0\n while pt < N and st == S[pt]:\n pt += 1\n tmp += 1\n if max == tmp:\n Output.append(st)\n if max < tmp:\n max = tmp\n Output.clear()\n Output.append(st)\n\nfor i in range(len(Output)):\n print(Output[i])\n","repo_name":"humancipher/Programming_Contest","sub_path":"Programming_Contest/AtCoder/ABC/ABC_100-199/ABC_150-159/ABC_155/ABC_155_C.py","file_name":"ABC_155_C.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"17471028601","text":"\"\"\" Script for find result of poker game saved in\ntxt file with following structure:\n\n - each line is one game\n - each item is one card\n - first 5 items in line is player_1 hand\n - last 5 items in line is player_2 hand\n\n8C TS KC 9H 4S 7D 2S 5D 3S AC\n5C AD 5D AC 9C 7C 5H 8D TD KS\n3H 7H 6S KC JS QH TD JC 2D 8S\n\n\"\"\"\n\nimport re\n\nPATH = 'C://repositories//files//poker.txt'\ncards_rank = ['A', 'K', 'Q', 'J', 'T', '9', '8',\n '7', '6', '5', '4', '3', '2']\ncards = ''.join(cards_rank)\nsuits = 'HDCS'\n\nfive_cards_set = [cards[i:i + 5] for i in range(len(cards) - 4)]\n\n\ndef c_sf_s(x):\n return r'(%s)' % '|'.join(x(n, i) for n, i in enumerate(five_cards_set))\n\n\ndef royal_flush():\n return r'(A([{c}])K\\2Q\\2J\\2T\\2)'\n\n@c_sf_s\ndef straight_flush(n, i):\n return ''.join('[%s]' % j + (\n '([%s])' % suits if m == 0 else '\\%d' % (n + 2)\n ) for m, j in enumerate(i))\n\n\ndef quads():\n return r'(([{f}])[{c}]\\2[{c}]\\2[{c}]\\2[{c}])'\n\n\ndef flush():\n return r'([{f}]([{c}])[{f}]\\2[{f}]\\2[{f}]\\2[{f}]\\2)'\n\n\n@c_sf_s\ndef straight(n, i):\n return ''.join(j + '[%s]' % suits for j in i)\n\n\ndef three(x):\n return r'(([{f}])[{c}]\\%d[{c}]\\%d[{c}])' % (2 + x, 2 + x)\n\n\ndef pair(x):\n return r'(([{f}])[{c}]\\%d[{c}])' % (2 + x)\n\n\ndef two_pair():\n return r'((?<=[{f}][{c}])%s%s|%s[{f}][{c}]%s|%s%s(?=[{f}][{c}]))' % (\n pair(1), pair(3), pair(5), pair(7), pair(9), pair(11)\n )\n\n\ndef full():\n return r'(%s%s|%s%s)' % (\n pair(1), three(3), three(5), pair(7)\n )\n\n\nhand_ranking = [\n ('0_poker_krolewski', royal_flush()),\n ('1_poker', straight_flush),\n ('2_kareta', quads()),\n ('3_full', full()),\n ('4_kolor', flush()),\n ('5_street', straight),\n ('6_trojka', three(0)),\n ('7_dwie_pary', two_pair()),\n ('8_para', pair(0)),\n ('9_pusta', ''),\n]\nhand_ranking_reg = [i[1].format(f=cards, c=suits) for i in hand_ranking]\n\n\ndef sort_hand(hand):\n hand_sorted = sorted(hand, key=lambda x: cards_rank.index(x[0]))\n return ''.join(hand_sorted)\n\n\ndef find_hand(hand):\n hand_aggr = [None, None, None]\n for i, exp in enumerate(hand_ranking_reg):\n figure = re.search(exp, hand)\n if figure:\n hand_aggr[0] = i\n if i == 7 and len(figure.group()) == 10:\n hand_aggr[1] = ''.join((figure.group(6), figure.group(8)))\n else:\n hand_aggr[1] = figure.group()\n break\n\n hand_aggr[2] = hand\n return hand_aggr\n\n\ndef compare(hand_1, hand_2, result):\n\n def check(v_1, v_2, v_e=False):\n if v_1 < v_2:\n result['gracz_1'] += 1\n elif v_1 > v_2:\n result['gracz_2'] += 1\n else:\n if v_e:\n v_e()\n\n def check_all():\n for i, j in zip(hand_1[2][::2], hand_2[2][::2]):\n i_ord = cards_rank.index(i)\n j_ord = cards_rank.index(j)\n if i_ord < j_ord:\n result['gracz_1'] += 1\n break\n elif i_ord > j_ord:\n result['gracz_2'] += 1\n break\n\n def check_full():\n\n h_1_cards = hand_1[1][::2]\n h_2_cards = hand_2[1][::2]\n h_1_f_three_i = cards_rank.index(re.search(r'.{3}', h_1_cards)[0][0])\n h_2_f_three_i = cards_rank.index(re.search(r'.{3}', h_2_cards)[0][0])\n if h_1_f_three_i < h_2_f_three_i:\n result['gracz_1'] += 1\n elif h_1_f_three_i > h_2_f_three_i:\n result['gracz_2'] += 1\n else:\n h_1_f_pair = cards_rank.index(re.search(r'.{2}', h_1_cards)[0][0])\n h_2_f_pair = cards_rank.index(re.search(r'.{2}', h_2_cards)[0][0])\n check(h_1_f_pair, h_2_f_pair, check_all)\n\n h_1_0 = hand_1[0]\n h_2_0 = hand_2[0]\n if h_1_0 < h_2_0:\n result['gracz_1'] += 1\n elif h_1_0 > h_2_0:\n result['gracz_2'] += 1\n elif h_1_0 == h_2_0:\n if h_1_0 == 9:\n check_all()\n elif h_1_0 == 3:\n check_full()\n else:\n h_1_1_0_i = cards_rank.index(hand_1[1][0])\n h_2_1_0_i = cards_rank.index(hand_2[1][0])\n if h_1_1_0_i < h_2_1_0_i:\n result['gracz_1'] += 1\n elif h_1_1_0_i > h_2_1_0_i:\n result['gracz_2'] += 1\n elif h_1_0 == 7:\n h_1_1_3_i = cards_rank.index(hand_1[1][3])\n h_2_1_3_i = cards_rank.index(hand_2[1][3])\n check(h_1_1_3_i, h_2_1_3_i, check_all)\n else:\n check_all()\n return result\n\n\nresult = {'gracz_1': 0, 'gracz_2': 0}\n\nwith open(PATH) as file:\n for line in file:\n hands = line.replace('\\n', '').split(' ')\n hand_1 = find_hand(sort_hand(hands[:5]))\n hand_2 = find_hand(sort_hand(hands[5:]))\n result = compare(hand_1, hand_2, result)\n\nprint(result)\n\n\ndef test_find_pattern():\n\n # poker_krolewski:\n assert find_hand('AHKHQHJHTH')[0] == 0\n\n # poker:\n assert find_hand('KHQHJHTH9H')[0] == 1\n\n # kareta:\n assert find_hand('AHADACASJS')[0] == 2\n assert find_hand('AHJDJCJSJH')[0] == 2\n\n # full:\n assert find_hand('AHADAS2H2D')[0] == 3\n assert find_hand('AHAD2S2H2D')[0] == 3\n\n # kolor:\n assert find_hand('AHQHJHTH9H')[0] == 4\n\n # street:\n assert find_hand('JHTD9C8S7H')[0] == 5\n\n # trojka:\n assert find_hand('AHADAC2SJS')[0] == 6\n assert find_hand('AHKDKCKHJS')[0] == 6\n assert find_hand('AHKDQCQHQS')[0] == 6\n\n # dwie_pary:\n assert find_hand('AHKHKD3H3D')[0] == 7\n assert find_hand('AHADKDKC2S')[0] == 7\n assert find_hand('AHADKSQDQC')[0] == 7\n\n # para:\n assert find_hand('AHKDKDQC2S')[0] == 8\n assert find_hand('AHKDJD3C3S')[0] == 8\n assert find_hand('AHKDJD3C3S')[0] == 8\n\n # pusta:\n assert find_hand('ASKHQHJH2H')[0] == 9\n\n\ndef test_compare():\n\n result = {'gracz_1': 0, 'gracz_2': 0}\n desired_resp = {'gracz_1': 1, 'gracz_2': 0}\n\n resp = compare(\n [3, 'AHADAS2H2D', 'AHADAS2H2D'],\n [4, 'AHQHJHTH9H', 'AHQHJHTH9H'],\n result.copy()\n )\n assert resp == desired_resp\n\n resp = compare(\n [9, '', 'ASKDJD8H3D'],\n [9, '', 'QD8C7C6C5C'],\n result.copy()\n )\n assert resp == desired_resp\n\n resp = compare(\n [3, 'AHADAS2H2D', 'AHADAS2H2D'],\n [3, 'KHKD2S2H2D', 'KHKD2S2H2D'],\n result.copy()\n )\n assert resp == desired_resp\n\n resp = compare(\n [3, 'AHADKHKDKS', 'AHADKHKDKS'],\n [3, 'ACASTHTDTS', 'ACASTHTDTS'],\n result.copy()\n )\n assert resp == desired_resp\n\n resp = compare(\n [3, 'ACASTHTDTS', 'ACASTHTDTS'],\n [3, 'KCKSTHTDTS', 'KCKSTHTDTS'],\n result.copy()\n )\n assert resp == desired_resp\n\n resp = compare(\n [3, 'AHADKHKDKS', 'AHADKHKDKS'],\n [3, 'AHADKHKDKS', 'AHADKHKDKS'],\n result.copy()\n )\n assert resp == {'gracz_1': 0, 'gracz_2': 0}\n\n resp = compare(\n [8, 'AHKDJS2H2D', 'KHQD5S2H2D'],\n [8, 'KHKD2S2H2D', 'KHKD2S2H2D'],\n result.copy()\n )\n assert resp == desired_resp\n","repo_name":"avpps/python_various","sub_path":"poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
+{"seq_id":"30014148803","text":"#!/usr/bin/env python\n\nimport re\nfrom subprocess import *\n\ntry:\n # Execute process\n output = Popen(['ls', '-tlr'], stdout=PIPE).communicate()[0]\n print(ouput)\n\n # Parse Results with Regex\n p = re.compile(r'\\s+')\n s = p.split(output)\n s.pop()\n print(s)\n\n # Assign Interesting Elements to new VARs\n d = s[0]\n t = s[1]\n tem = s[3]\n\n # Strip trailing temp value (F|C)\n tem2 = tem[:-1]\n print(\"DATE: \", d, \" Time:\", t, \"TEMPERATURE: \", tem2)\n\nexcept OSError as err1:\n print(\"ERROR: \", err1)\n exit(200)\n","repo_name":"hoodielive/pythonnerd","sub_path":"modules/subprocess_module/pcsensor.py","file_name":"pcsensor.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"}
|